diff --git a/.github/CONTRIBUTING.md b/CONTRIBUTING.md similarity index 100% rename from .github/CONTRIBUTING.md rename to CONTRIBUTING.md diff --git a/README.textile b/README.textile index 804f46a1811..be51570e271 100644 --- a/README.textile +++ b/README.textile @@ -147,7 +147,7 @@ curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d ' }' -The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index. +The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get their own special index. Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well): diff --git a/Vagrantfile b/Vagrantfile index 454d114f1a2..4f8ee7164f6 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -23,15 +23,15 @@ Vagrant.configure(2) do |config| config.vm.define "ubuntu-1204" do |config| - config.vm.box = "ubuntu/precise64" + config.vm.box = "elastic/ubuntu-12.04-x86_64" ubuntu_common config end config.vm.define "ubuntu-1404" do |config| - config.vm.box = "ubuntu/trusty64" + config.vm.box = "elastic/ubuntu-14.04-x86_64" ubuntu_common config end config.vm.define "ubuntu-1504" do |config| - config.vm.box = "ubuntu/vivid64" + config.vm.box = "elastic/ubuntu-15.04-x86_64" ubuntu_common config, extra: <<-SHELL # Install Jayatana so we can work around it being present. [ -f /usr/share/java/jayatanaag.jar ] || install jayatana @@ -41,44 +41,35 @@ Vagrant.configure(2) do |config| # get the sun jdk on there just aren't worth it. We have jessie for testing # debian and it works fine. config.vm.define "debian-8" do |config| - config.vm.box = "debian/jessie64" - deb_common config, - 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' + config.vm.box = "elastic/debian-8-x86_64" + deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' end config.vm.define "centos-6" do |config| - config.vm.box = "boxcutter/centos67" + config.vm.box = "elastic/centos-6-x86_64" rpm_common config end config.vm.define "centos-7" do |config| - # There is a centos/7 box but it doesn't have rsync or virtualbox guest - # stuff on there so its slow to use. So chef it is.... - config.vm.box = "boxcutter/centos71" + config.vm.box = "elastic/centos-7-x86_64" + rpm_common config + end + config.vm.define "oel-6" do |config| + config.vm.box = "elastic/oraclelinux-6-x86_64" rpm_common config end - # This box hangs _forever_ on ```yum check-update```. I have no idea why. - # config.vm.define "oel-6", autostart: false do |config| - # config.vm.box = "boxcutter/oel66" - # rpm_common(config) - # end config.vm.define "oel-7" do |config| - config.vm.box = "boxcutter/oel70" + config.vm.box = "elastic/oraclelinux-7-x86_64" rpm_common config end config.vm.define "fedora-22" do |config| - # Fedora hosts their own 'cloud' images that aren't in Vagrant's Atlas but - # and are missing required stuff like rsync. It'd be nice if we could use - # them but they much slower to get up and running then the boxcutter image. - config.vm.box = "boxcutter/fedora22" + config.vm.box = "elastic/fedora-22-x86_64" dnf_common config end config.vm.define "opensuse-13" do |config| - config.vm.box = "chef/opensuse-13" - config.vm.box_url = "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_opensuse-13.2-x86_64_chef-provisionerless.box" + config.vm.box = "elastic/opensuse-13-x86_64" opensuse_common config end - # The SLES boxes are not considered to be highest quality, but seem to be sufficient for a test run config.vm.define "sles-12" do |config| - config.vm.box = "idar/sles12" + config.vm.box = "elastic/sles-12-x86_64" sles_common config end # Switch the default share for the project root from /vagrant to diff --git a/build.gradle b/build.gradle index b419bf01e15..6ab00d73881 100644 --- a/build.gradle +++ b/build.gradle @@ -116,6 +116,7 @@ subprojects { "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm', "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb', + "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', ] configurations.all { resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index ca78157bcf2..598be546f26 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -307,6 +307,12 @@ class BuildPlugin implements Plugin { /** Adds repositores used by ES dependencies */ static void configureRepositories(Project project) { RepositoryHandler repos = project.repositories + if (System.getProperty("repos.mavenlocal") != null) { + // with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is + // useful for development ie. bwc tests where we install stuff in the local repository + // such that we don't have to pass hardcoded files to gradle + repos.mavenLocal() + } repos.mavenCentral() repos.maven { name 'sonatype-snapshots' @@ -407,6 +413,7 @@ class BuildPlugin implements Plugin { systemProperty 'jna.nosys', 'true' // default test sysprop values systemProperty 'tests.ifNoTests', 'fail' + // TODO: remove setting logging level via system property systemProperty 'es.logger.level', 'WARN' for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('tests.') || diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy index 7b949b3e1da..54699a52e76 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy @@ -39,9 +39,6 @@ class PluginPropertiesExtension { @Input String classname - @Input - boolean isolated = true - PluginPropertiesExtension(Project project) { name = project.name version = project.version diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index b5128817fb0..856c9255312 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -54,12 +54,6 @@ class PluginPropertiesTask extends Copy { if (extension.classname == null) { throw new InvalidUserDataException('classname is a required setting for esplugin') } - doFirst { - if (extension.isolated == false) { - String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future" - logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}") - } - } // configure property substitution from(templateFile) into(generatedResourcesDir) @@ -80,7 +74,6 @@ class PluginPropertiesTask extends Copy { 'version': stringSnap(extension.version), 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), 'javaVersion': project.targetCompatibility as String, - 'isolated': extension.isolated as String, 'classname': extension.classname ] } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy new file mode 100644 index 00000000000..b280a74db58 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit + +import org.elasticsearch.gradle.LoggedExec +import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.InputFiles +import org.gradle.api.tasks.OutputFile + +/** + * Runs LoggerUsageCheck on a set of directories. + */ +public class LoggerUsageTask extends LoggedExec { + + /** + * We use a simple "marker" file that we touch when the task succeeds + * as the task output. This is compared against the modified time of the + * inputs (ie the jars/class files). + */ + private File successMarker = new File(project.buildDir, 'markers/loggerUsage') + + private FileCollection classpath; + + private List classDirectories; + + public LoggerUsageTask() { + project.afterEvaluate { + dependsOn(classpath) + description = "Runs LoggerUsageCheck on ${classDirectories}" + executable = new File(project.javaHome, 'bin/java') + if (classDirectories == null) { + classDirectories = [] + if (project.sourceSets.findByName("main") && project.sourceSets.main.output.classesDir.exists()) { + classDirectories += [project.sourceSets.main.output.classesDir] + dependsOn project.tasks.classes + } + if (project.sourceSets.findByName("test") && project.sourceSets.test.output.classesDir.exists()) { + classDirectories += [project.sourceSets.test.output.classesDir] + dependsOn project.tasks.testClasses + } + } + doFirst({ + args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker') + getClassDirectories().each { + args it.getAbsolutePath() + } + }) + doLast({ + successMarker.parentFile.mkdirs() + successMarker.setText("", 'UTF-8') + }) + } + } + + @InputFiles + FileCollection getClasspath() { + return classpath + } + + void setClasspath(FileCollection classpath) { + this.classpath = classpath + } + + @InputFiles + List getClassDirectories() { + return classDirectories + } + + void setClassDirectories(List classDirectories) { + this.classDirectories = classDirectories + } + + @OutputFile + File getSuccessMarker() { + return successMarker + } + + void setSuccessMarker(File successMarker) { + this.successMarker = successMarker + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index ab524351274..0d4a51f050a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,6 +34,7 @@ class PrecommitTasks { configureForbiddenApis(project), configureCheckstyle(project), configureNamingConventions(project), + configureLoggerUsage(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('jarHell', JarHellTask.class), @@ -63,21 +64,21 @@ class PrecommitTasks { project.forbiddenApis { internalRuntimeForbidden = true failOnUnsupportedJava = false - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] - signaturesURLs = [getClass().getResource('/forbidden/all-signatures.txt')] + bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] + signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'), + getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] } Task mainForbidden = project.tasks.findByName('forbiddenApisMain') if (mainForbidden != null) { mainForbidden.configure { - bundledSignatures += 'jdk-system-out' - signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/es-core-signatures.txt') } } Task testForbidden = project.tasks.findByName('forbiddenApisTest') if (testForbidden != null) { testForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt') + signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') @@ -117,4 +118,18 @@ class PrecommitTasks { } return null } + + private static Task configureLoggerUsage(Project project) { + Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) + + project.configurations.create('loggerUsagePlugin') + project.dependencies.add('loggerUsagePlugin', + "org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + + loggerUsageTask.configure { + classpath = project.configurations.loggerUsagePlugin + } + + return loggerUsageTask + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index c9db5657ba4..19b41cc8cde 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -23,8 +23,6 @@ import org.gradle.api.Project import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input -import java.time.LocalDateTime - /** Configuration for an elasticsearch cluster, used for integration tests. */ class ClusterConfiguration { @@ -34,6 +32,12 @@ class ClusterConfiguration { @Input int numNodes = 1 + @Input + int numBwcNodes = 0 + + @Input + String bwcVersion = null + @Input int httpPort = 0 @@ -49,6 +53,15 @@ class ClusterConfiguration { @Input String jvmArgs = System.getProperty('tests.jvm.argline', '') + /** + * The seed nodes port file. In the case the cluster has more than one node we use a seed node + * to form the cluster. The file is null if there is no seed node yet available. + * + * Note: this can only be null if the cluster has only one node or if the first node is not yet + * configured. All nodes but the first node should see a non null value. + */ + File seedNodePortsFile + /** * A closure to call before the cluster is considered ready. The closure is passed the node info, * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait @@ -119,4 +132,12 @@ class ClusterConfiguration { } extraConfigFiles.put(path, sourceFile) } + + /** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/ + String seedNodeTransportUri() { + if (seedNodePortsFile != null) { + return seedNodePortsFile.readLines("UTF-8").get(0) + } + return null; + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d96ee511051..a82fefdc510 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -53,13 +53,59 @@ class ClusterFormationTasks { // no need to add cluster formation tasks if the task won't run! return } - configureDistributionDependency(project, config.distribution) - List startTasks = [] + File sharedDir = new File(project.buildDir, "cluster/shared") + // first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything + // in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk + // such that snapshots survive failures / test runs and there is no simple way today to fix that. + Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) { + delete sharedDir + doLast { + sharedDir.mkdirs() + } + } + List startTasks = [cleanup] List nodes = [] + if (config.numNodes < config.numBwcNodes) { + throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]") + } + if (config.numBwcNodes > 0 && config.bwcVersion == null) { + throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0") + } + // this is our current version distribution configuration we use for all kinds of REST tests etc. + project.configurations { + elasticsearchDistro + } + configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch) + if (config.bwcVersion != null && config.numBwcNodes > 0) { + // if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version + // this version uses the same distribution etc. and only differs in the version we depend on. + // from here on everything else works the same as if it's the current version, we fetch the BWC version + // from mirrors using gradles built-in mechanism etc. + project.configurations { + elasticsearchBwcDistro + } + configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion) + } + for (int i = 0; i < config.numNodes; ++i) { - NodeInfo node = new NodeInfo(config, i, project, task) + // we start N nodes and out of these N nodes there might be M bwc nodes. + // for each of those nodes we might have a different configuratioon + String elasticsearchVersion = VersionProperties.elasticsearch + Configuration configuration = project.configurations.elasticsearchDistro + if (i < config.numBwcNodes) { + elasticsearchVersion = config.bwcVersion + configuration = project.configurations.elasticsearchBwcDistro + } + NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir) + if (i == 0) { + if (config.seedNodePortsFile != null) { + // we might allow this in the future to be set but for now we are the only authority to set this! + throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized") + } + config.seedNodePortsFile = node.transportPortsFile; + } nodes.add(node) - startTasks.add(configureNode(project, task, node)) + startTasks.add(configureNode(project, task, cleanup, node, configuration)) } Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) @@ -70,20 +116,14 @@ class ClusterFormationTasks { } /** Adds a dependency on the given distribution */ - static void configureDistributionDependency(Project project, String distro) { - String elasticsearchVersion = VersionProperties.elasticsearch + static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { String packaging = distro if (distro == 'tar') { packaging = 'tar.gz' } else if (distro == 'integ-test-zip') { packaging = 'zip' } - project.configurations { - elasticsearchDistro - } - project.dependencies { - elasticsearchDistro "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}" - } + project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}") } /** @@ -103,10 +143,10 @@ class ClusterFormationTasks { * * @return a task which starts the node. */ - static Task configureNode(Project project, Task task, NodeInfo node) { + static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) { // tasks are chained so their execution order is maintained - Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: task.dependsOn.collect()) { + Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) { delete node.homeDir delete node.cwd doLast { @@ -115,7 +155,7 @@ class ClusterFormationTasks { } setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node) setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node) + setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration) setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node) setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node) setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node) @@ -151,27 +191,28 @@ class ClusterFormationTasks { } /** Adds a task to extract the elasticsearch distribution */ - static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) { - List extractDependsOn = [project.configurations.elasticsearchDistro, setup] - /* project.configurations.elasticsearchDistro.singleFile will be an - external artifact if this is being run by a plugin not living in the - elasticsearch source tree. If this is a plugin built in the - elasticsearch source tree or this is a distro in the elasticsearch - source tree then this should be the version of elasticsearch built - by the source tree. If it isn't then Bad Things(TM) will happen. */ + static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, Configuration configuration) { + List extractDependsOn = [configuration, setup] + /* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the + elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in + the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. + If it isn't then Bad Things(TM) will happen. */ Task extract + switch (node.config.distribution) { case 'integ-test-zip': case 'zip': extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) } + from { + project.zipTree(configuration.singleFile) + } into node.baseDir } break; case 'tar': extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { from { - project.tarTree(project.resources.gzip(project.configurations.elasticsearchDistro.singleFile)) + project.tarTree(project.resources.gzip(configuration.singleFile)) } into node.baseDir } @@ -180,7 +221,7 @@ class ClusterFormationTasks { File rpmDatabase = new File(node.baseDir, 'rpm-database') File rpmExtracted = new File(node.baseDir, 'rpm-extracted') /* Delay reading the location of the rpm file until task execution */ - Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}" + Object rpm = "${ -> configuration.singleFile}" extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) { commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers', '--dbpath', rpmDatabase, @@ -195,7 +236,7 @@ class ClusterFormationTasks { case 'deb': /* Delay reading the location of the deb file until task execution */ File debExtracted = new File(node.baseDir, 'deb-extracted') - Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}" + Object deb = "${ -> configuration.singleFile}" extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) { commandLine 'dpkg-deb', '-x', deb, debExtracted doFirst { @@ -214,26 +255,28 @@ class ClusterFormationTasks { Map esConfig = [ 'cluster.name' : node.clusterName, 'pidfile' : node.pidFile, - 'path.repo' : "${node.homeDir}/repo", - 'path.shared_data' : "${node.homeDir}/../", + 'path.repo' : "${node.sharedDir}/repo", + 'path.shared_data' : "${node.sharedDir}/", // Define a node attribute so we can test that it exists 'node.testattr' : 'test', 'repositories.url.allowed_urls': 'http://snapshot.test*' ] - if (node.config.numNodes == 1) { - esConfig['http.port'] = node.config.httpPort - esConfig['transport.tcp.port'] = node.config.transportPort - } else { - // TODO: fix multi node so it doesn't use hardcoded prots - esConfig['http.port'] = 9400 + node.nodeNum - esConfig['transport.tcp.port'] = 9500 + node.nodeNum - esConfig['discovery.zen.ping.unicast.hosts'] = (0.. 0) { // multi-node cluster case, we have to wait for the seed node to startup + ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { + resourceexists { + file(file: node.config.seedNodePortsFile.toString()) + } + } + // the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast + // host and join the cluster via that. + esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\"" + } File configFile = new File(node.confDir, 'elasticsearch.yml') logger.info("Configuring ${configFile}") configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index b41b1822000..f68084c61fe 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -40,6 +40,9 @@ class NodeInfo { /** root directory all node files and operations happen under */ File baseDir + /** shared data directory all nodes share */ + File sharedDir + /** the pid file the node will use */ File pidFile @@ -89,14 +92,15 @@ class NodeInfo { ByteArrayOutputStream buffer = new ByteArrayOutputStream() /** Creates a node to run as part of a cluster for the given task */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) { + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum + this.sharedDir = sharedDir clusterName = "${task.path.replace(':', '_').substring(1)}" baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') - homeDir = homeDir(baseDir, config.distribution) - confDir = confDir(baseDir, config.distribution) + homeDir = homeDir(baseDir, config.distribution, nodeVersion) + confDir = confDir(baseDir, config.distribution, nodeVersion) configFile = new File(confDir, 'elasticsearch.yml') // even for rpm/deb, the logs are under home because we dont start with real services File logsDir = new File(homeDir, 'logs') @@ -129,14 +133,15 @@ class NodeInfo { 'JAVA_HOME' : project.javaHome, 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc ] - args.add("-Des.node.portsfile=true") - args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" }) + args.addAll("-E", "es.node.portsfile=true") + env.put('ES_JAVA_OPTS', config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")) for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('es.')) { - args.add("-D${property.getKey()}=${property.getValue()}") + args.add("-E") + args.add("${property.getKey()}=${property.getValue()}") } } - args.add("-Des.path.conf=${confDir}") + args.addAll("-E", "es.path.conf=${confDir}") if (Os.isFamily(Os.FAMILY_WINDOWS)) { args.add('"') // end the entire command, quoted } @@ -181,13 +186,13 @@ class NodeInfo { } /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro) { + static File homeDir(File baseDir, String distro, String nodeVersion) { String path switch (distro) { case 'integ-test-zip': case 'zip': case 'tar': - path = "elasticsearch-${VersionProperties.elasticsearch}" + path = "elasticsearch-${nodeVersion}" break case 'rpm': case 'deb': @@ -199,12 +204,12 @@ class NodeInfo { return new File(baseDir, path) } - static File confDir(File baseDir, String distro) { + static File confDir(File baseDir, String distro, String nodeVersion) { switch (distro) { case 'integ-test-zip': case 'zip': case 'tar': - return new File(homeDir(baseDir, distro), 'config') + return new File(homeDir(baseDir, distro, nodeVersion), 'config') case 'rpm': case 'deb': return new File(baseDir, "${distro}-extracted/etc/elasticsearch") diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index cbe612e5358..3bce1aece1d 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -208,7 +208,6 @@ - @@ -259,7 +258,6 @@ - @@ -269,14 +267,12 @@ - - @@ -353,16 +349,12 @@ - - - - @@ -384,7 +376,6 @@ - @@ -393,7 +384,6 @@ - @@ -424,7 +414,6 @@ - @@ -441,14 +430,12 @@ - - @@ -464,7 +451,6 @@ - @@ -609,7 +595,6 @@ - @@ -617,7 +602,6 @@ - @@ -669,12 +653,8 @@ - - - - @@ -896,7 +876,6 @@ - @@ -907,12 +886,9 @@ - - - @@ -1085,9 +1061,6 @@ - - - @@ -1097,8 +1070,6 @@ - - @@ -1134,7 +1105,6 @@ - @@ -1155,7 +1125,6 @@ - @@ -1336,14 +1305,9 @@ - - - - - @@ -1403,7 +1367,6 @@ - @@ -1425,7 +1388,6 @@ - @@ -1436,12 +1398,10 @@ - - @@ -1597,7 +1557,6 @@ - diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt new file mode 100644 index 00000000000..d258c098911 --- /dev/null +++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt @@ -0,0 +1,30 @@ +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. +java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. + +java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 +java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 + +@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness +java.util.Random#() +java.util.concurrent.ThreadLocalRandom + +java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests + +@defaultMessage this should not have been added to lucene in the first place +org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() diff --git a/buildSrc/src/main/resources/forbidden/core-signatures.txt b/buildSrc/src/main/resources/forbidden/es-core-signatures.txt similarity index 100% rename from buildSrc/src/main/resources/forbidden/core-signatures.txt rename to buildSrc/src/main/resources/forbidden/es-core-signatures.txt diff --git a/buildSrc/src/main/resources/forbidden/test-signatures.txt b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt similarity index 100% rename from buildSrc/src/main/resources/forbidden/test-signatures.txt rename to buildSrc/src/main/resources/forbidden/es-test-signatures.txt diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/jdk-signatures.txt similarity index 85% rename from buildSrc/src/main/resources/forbidden/all-signatures.txt rename to buildSrc/src/main/resources/forbidden/jdk-signatures.txt index 9bc37005514..994b1ad3a4a 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/jdk-signatures.txt @@ -33,9 +33,6 @@ java.util.Formatter#(java.lang.String,java.lang.String,java.util.Locale) java.io.RandomAccessFile java.nio.file.Path#toFile() -java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. -java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. - @defaultMessage Specify a location for the temp file/directory instead. java.nio.file.Files#createTempDirectory(java.lang.String,java.nio.file.attribute.FileAttribute[]) java.nio.file.Files#createTempFile(java.lang.String,java.lang.String,java.nio.file.attribute.FileAttribute[]) @@ -48,9 +45,6 @@ java.io.ObjectInput java.nio.file.Files#isHidden(java.nio.file.Path) @ Dependent on the operating system, use FileSystemUtils.isHidden instead -java.nio.file.Files#getFileStore(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.getFileStore() instead, impacted by JDK-8034057 -java.nio.file.Files#isWritable(java.nio.file.Path) @ Use org.elasticsearch.env.Environment.isWritable() instead, impacted by JDK-8034057 - @defaultMessage Resolve hosts explicitly to the address(es) you want with InetAddress. java.net.InetSocketAddress#(java.lang.String,int) java.net.Socket#(java.lang.String,int) @@ -89,9 +83,6 @@ java.lang.Class#getDeclaredMethods() @ Do not violate java's access system: Use java.lang.reflect.AccessibleObject#setAccessible(boolean) java.lang.reflect.AccessibleObject#setAccessible(java.lang.reflect.AccessibleObject[], boolean) -@defaultMessage this should not have been added to lucene in the first place -org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey() - @defaultMessage this method needs special permission java.lang.Thread#getAllStackTraces() @@ -112,8 +103,3 @@ java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness -@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness -java.util.Random#() -java.util.concurrent.ThreadLocalRandom - -java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests diff --git a/buildSrc/src/main/resources/plugin-descriptor.properties b/buildSrc/src/main/resources/plugin-descriptor.properties index 7659f11bd09..ebde46d326b 100644 --- a/buildSrc/src/main/resources/plugin-descriptor.properties +++ b/buildSrc/src/main/resources/plugin-descriptor.properties @@ -38,12 +38,3 @@ java.version=${javaVersion} # # 'elasticsearch.version' version of elasticsearch compiled against elasticsearch.version=${elasticsearchVersion} -# -### deprecated elements for jvm plugins : -# -# 'isolated': true if the plugin should have its own classloader. -# passing false is deprecated, and only intended to support plugins -# that have hard dependencies against each other. If this is -# not specified, then the plugin is isolated by default. -isolated=${isolated} -# \ No newline at end of file diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f75d5a936bb..13c0885a61a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ -elasticsearch = 5.0.0 -lucene = 6.0.0-snapshot-bea235f +elasticsearch = 5.0.0-alpha1 +lucene = 6.0.0-snapshot-f0aa4fc # optional dependencies spatial4j = 0.6 diff --git a/core/build.gradle b/core/build.gradle index 39c1e4367c0..ab3754e72ff 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -49,7 +49,7 @@ dependencies { compile 'org.elasticsearch:securesm:1.0' // utilities - compile 'commons-cli:commons-cli:1.3.1' + compile 'net.sf.jopt-simple:jopt-simple:4.9' compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 4e24944ffac..564f780b8ed 100644 --- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -237,6 +237,10 @@ public abstract class BlendedTermQuery extends Query { return newCtx; } + public List getTerms() { + return Arrays.asList(terms); + } + @Override public String toString(String field) { StringBuilder builder = new StringBuilder("blended(terms:["); diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index a7c53a56bc4..6ddd7591caa 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -787,8 +787,9 @@ public class MapperQueryParser extends QueryParser { assert q instanceof BoostQuery == false; return pq; } else if (q instanceof MultiPhraseQuery) { - ((MultiPhraseQuery) q).setSlop(slop); - return q; + MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q); + builder.setSlop(slop); + return builder.build(); } else { return q; } diff --git a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 089b649cefe..3c0bda97347 100644 --- a/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/core/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import java.io.IOException; import java.util.Collection; -import java.util.List; /** * @@ -68,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery { flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost); } else if (sourceQuery instanceof MultiPhraseQuery) { MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery); - convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); + convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries); } else if (sourceQuery instanceof BlendedTermQuery) { final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery; flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost); @@ -77,7 +76,7 @@ public class CustomFieldQuery extends FieldQuery { } } - private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List terms, int[] pos, IndexReader reader, Collection flatQueries) throws IOException { + private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection flatQueries) throws IOException { if (currentPos == 0) { // if we have more than 16 terms int numTerms = 0; @@ -97,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery { * we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports. * It seems expensive but most queries will pretty small. */ - if (currentPos == terms.size()) { + if (currentPos == terms.length) { PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder(); queryBuilder.setSlop(orig.getSlop()); for (int i = 0; i < termsIdx.length; i++) { - queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]); + queryBuilder.add(terms[i][termsIdx[i]], pos[i]); } Query query = queryBuilder.build(); this.flatten(query, reader, flatQueries, 1F); } else { - Term[] t = terms.get(currentPos); + Term[] t = terms[currentPos]; for (int i = 0; i < t.length; i++) { termsIdx[currentPos] = i; convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries); diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java index d069bddfdfe..7fd81f5ddfe 100644 --- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -645,8 +645,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte // 87 used to be for MergeMappingException INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88), - PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, - org.elasticsearch.percolator.PercolateException::new, 89), REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90), AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class, diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index eeb4825cb90..fe794d96661 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -60,11 +60,13 @@ public class Version { public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_2_0_ID = 2020099; public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); + public static final int V_2_2_1_ID = 2020199; + public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); public static final int V_2_3_0_ID = 2030099; public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_5_0_0_ID = 5000099; - public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); - public static final Version CURRENT = V_5_0_0; + public static final int V_5_0_0_alpha1_ID = 5000001; + public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); + public static final Version CURRENT = V_5_0_0_alpha1; static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" @@ -77,10 +79,12 @@ public class Version { public static Version fromId(int id) { switch (id) { - case V_5_0_0_ID: - return V_5_0_0; + case V_5_0_0_alpha1_ID: + return V_5_0_0_alpha1; case V_2_3_0_ID: return V_2_3_0; + case V_2_2_1_ID: + return V_2_2_1; case V_2_2_0_ID: return V_2_2_0; case V_2_1_2_ID: diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 491202e7c7a..2b33a669428 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -165,7 +165,6 @@ import org.elasticsearch.action.percolate.MultiPercolateAction; import org.elasticsearch.action.percolate.PercolateAction; import org.elasticsearch.action.percolate.TransportMultiPercolateAction; import org.elasticsearch.action.percolate.TransportPercolateAction; -import org.elasticsearch.action.percolate.TransportShardMultiPercolateAction; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.SearchAction; @@ -174,8 +173,6 @@ import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; -import org.elasticsearch.action.suggest.SuggestAction; -import org.elasticsearch.action.suggest.TransportSuggestAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; @@ -321,7 +318,6 @@ public class ActionModule extends AbstractModule { registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class, TransportShardMultiTermsVectorAction.class); registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class); - registerAction(SuggestAction.INSTANCE, TransportSuggestAction.class); registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class); registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class, TransportShardMultiGetAction.class); @@ -331,7 +327,7 @@ public class ActionModule extends AbstractModule { registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class); registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class); registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class); - registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class); + registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class); registerAction(ExplainAction.INSTANCE, TransportExplainAction.class); registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class); registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index b5c9577aff7..069f0ebe1b8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -32,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -213,7 +213,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< } if (request.indices() != null && request.indices().length > 0) { try { - indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices()); + indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), request.indices()); waitForCounter++; } catch (IndexNotFoundException e) { response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED @@ -280,7 +280,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< String[] concreteIndices; try { - concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); } catch (IndexNotFoundException e) { // one of the specified indices is not there - treat it as RED. ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState, diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index c743a1d2a91..d53f651da45 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 11c542863b5..87ec2d052ab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -231,8 +231,7 @@ public class NodeInfo extends BaseNodeResponse { plugins.readFrom(in); } if (in.readBoolean()) { - ingest = new IngestInfo(); - ingest.readFrom(in); + ingest = new IngestInfo(in); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index b14450f9eb1..f52729faa4f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java index 65913bc4b28..8b29b9379ac 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/liveness/TransportLivenessAction.java @@ -20,7 +20,7 @@ package org.elasticsearch.action.admin.cluster.node.liveness; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index a4cf2b1de2a..442b5edde77 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -31,6 +31,7 @@ import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.http.HttpStats; import org.elasticsearch.indices.NodeIndicesStats; import org.elasticsearch.indices.breaker.AllCircuitBreakerStats; +import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.jvm.JvmStats; import org.elasticsearch.monitor.os.OsStats; @@ -81,6 +82,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { @Nullable private DiscoveryStats discoveryStats; + @Nullable + private IngestStats ingestStats; + NodeStats() { } @@ -89,7 +93,8 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { @Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http, @Nullable AllCircuitBreakerStats breaker, @Nullable ScriptStats scriptStats, - @Nullable DiscoveryStats discoveryStats) { + @Nullable DiscoveryStats discoveryStats, + @Nullable IngestStats ingestStats) { super(node); this.timestamp = timestamp; this.indices = indices; @@ -103,6 +108,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { this.breaker = breaker; this.scriptStats = scriptStats; this.discoveryStats = discoveryStats; + this.ingestStats = ingestStats; } public long getTimestamp() { @@ -187,6 +193,11 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { return this.discoveryStats; } + @Nullable + public IngestStats getIngestStats() { + return ingestStats; + } + public static NodeStats readNodeStats(StreamInput in) throws IOException { NodeStats nodeInfo = new NodeStats(); nodeInfo.readFrom(in); @@ -224,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in); scriptStats = in.readOptionalStreamable(ScriptStats::new); discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null)); - + ingestStats = in.readOptionalWriteable(IngestStats::new); } @Override @@ -282,6 +293,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { out.writeOptionalStreamable(breaker); out.writeOptionalStreamable(scriptStats); out.writeOptionalStreamable(discoveryStats); + out.writeOptionalWriteable(ingestStats); } @Override @@ -337,6 +349,10 @@ public class NodeStats extends BaseNodeResponse implements ToXContent { getDiscoveryStats().toXContent(builder, params); } + if (getIngestStats() != null) { + getIngestStats().toXContent(builder, params); + } + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 5916421c1ed..88162a617a8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -42,6 +42,7 @@ public class NodesStatsRequest extends BaseNodesRequest { private boolean breaker; private boolean script; private boolean discovery; + private boolean ingest; public NodesStatsRequest() { } @@ -69,6 +70,7 @@ public class NodesStatsRequest extends BaseNodesRequest { this.breaker = true; this.script = true; this.discovery = true; + this.ingest = true; return this; } @@ -87,6 +89,7 @@ public class NodesStatsRequest extends BaseNodesRequest { this.breaker = false; this.script = false; this.discovery = false; + this.ingest = false; return this; } @@ -250,6 +253,17 @@ public class NodesStatsRequest extends BaseNodesRequest { return this; } + public boolean ingest() { + return ingest; + } + + /** + * Should ingest statistics be returned. + */ + public NodesStatsRequest ingest(boolean ingest) { + this.ingest = ingest; + return this; + } @Override public void readFrom(StreamInput in) throws IOException { @@ -265,6 +279,7 @@ public class NodesStatsRequest extends BaseNodesRequest { breaker = in.readBoolean(); script = in.readBoolean(); discovery = in.readBoolean(); + ingest = in.readBoolean(); } @Override @@ -281,6 +296,7 @@ public class NodesStatsRequest extends BaseNodesRequest { out.writeBoolean(breaker); out.writeBoolean(script); out.writeBoolean(discovery); + out.writeBoolean(ingest); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index dc35eefee7d..027e6122681 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -137,4 +137,12 @@ public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder { return super.match(task) && task instanceof CancellableTask; } - public CancelTasksRequest reason(String reason) { + /** + * Set the reason for canceling the task. + */ + public CancelTasksRequest setReason(String reason) { this.reason = reason; return this; } - public String reason() { + /** + * The reason for canceling the task. + */ + public String getReason() { return reason; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index b07e540d792..9dbe4ee1aeb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -26,10 +26,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -84,21 +84,21 @@ public class TransportCancelTasksAction extends TransportTasksAction operation) { - if (request.taskId().isSet() == false) { + if (request.getTaskId().isSet()) { // we are only checking one task, we can optimize it - CancellableTask task = taskManager.getCancellableTask(request.taskId().getId()); + CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept(task); } else { - throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation"); + throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation"); } } else { - if (taskManager.getTask(request.taskId().getId()) != null) { + if (taskManager.getTask(request.getTaskId().getId()) != null) { // The task exists, but doesn't support cancellation - throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation"); + throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation"); } else { - throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId()); + throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.getTaskId()); } } } else { @@ -113,14 +113,14 @@ public class TransportCancelTasksAction extends TransportTasksAction removeBanOnNodes(cancellableTask, nodes)); - Set childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished); + Set childNodes = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished); if (childNodes != null) { if (childNodes.isEmpty()) { logger.trace("cancelling task {} with no children", cancellableTask.getId()); return cancellableTask.taskInfo(clusterService.localNode(), false); } else { logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes); - setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock); + setBanOnNodes(request.getReason(), cancellableTask, childNodes, banLock); return cancellableTask.taskInfo(clusterService.localNode(), false); } } else { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 6bf8ac3e1ef..3fe743fc36a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -31,31 +31,49 @@ import java.io.IOException; public class ListTasksRequest extends BaseTasksRequest { private boolean detailed = false; + private boolean waitForCompletion = false; /** * Should the detailed task information be returned. */ - public boolean detailed() { + public boolean getDetailed() { return this.detailed; } /** * Should the detailed task information be returned. */ - public ListTasksRequest detailed(boolean detailed) { + public ListTasksRequest setDetailed(boolean detailed) { this.detailed = detailed; return this; } + /** + * Should this request wait for all found tasks to complete? + */ + public boolean getWaitForCompletion() { + return waitForCompletion; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public ListTasksRequest setWaitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + return this; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); detailed = in.readBoolean(); + waitForCompletion = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(detailed); + out.writeBoolean(waitForCompletion); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java index 2b462014f43..1385781125a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequestBuilder.java @@ -35,7 +35,15 @@ public class ListTasksRequestBuilder extends TasksRequestBuilder, ToXContent { } builder.dateValueField("start_time_in_millis", "start_time", startTime); builder.timeValueField("running_time_in_nanos", "running_time", runningTimeNanos, TimeUnit.NANOSECONDS); - if (parentTaskId.isSet() == false) { + if (parentTaskId.isSet()) { builder.field("parent_task_id", parentTaskId.toString()); } return builder; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index 40f8894d3be..e6ea002a794 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -19,28 +19,36 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Collection; import java.util.List; +import java.util.function.Consumer; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; /** * */ public class TransportListTasksAction extends TransportTasksAction { + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); + private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); @Inject public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { @@ -59,7 +67,40 @@ public class TransportListTasksAction extends TransportTasksAction operation) { + if (false == request.getWaitForCompletion()) { + super.processTasks(request, operation); + return; + } + // If we should wait for completion then we have to intercept every found task and wait for it to leave the manager. + TimeValue timeout = request.getTimeout(); + if (timeout == null) { + timeout = DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT; + } + long timeoutTime = System.nanoTime() + timeout.nanos(); + super.processTasks(request, operation.andThen((Task t) -> { + while (System.nanoTime() - timeoutTime < 0) { + Task task = taskManager.getTask(t.getId()); + if (task == null) { + return; + } + if (task.getAction().startsWith(ListTasksAction.NAME)) { + // It doesn't make sense to wait for List Tasks and it can cause an infinite loop of the task waiting + // for itself of one of its child tasks + return; + } + try { + Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); + } catch (InterruptedException e) { + throw new ElasticsearchException("Interrupted waiting for completion of [{}]", e, t); + } + } + throw new ElasticsearchTimeoutException("Timed out waiting for completion of [{}]", t); + })); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 79e51f9a46e..a17d2aac892 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.repositories.delete; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 39d9cacbda3..490d20f086c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -30,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index efc45f16cbd..d1639001352 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 8b1d9816004..2c75335dcaa 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoriesService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index d7ec84fb7a5..e6116dbfbc4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 75f94921e61..60a0e7a8046 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -33,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java index 9f3ccac8f64..1b329d17289 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsGroup.java @@ -67,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - index = Index.readIndex(in); + index = new Index(in); shardId = in.readVInt(); shards = new ShardRouting[in.readVInt()]; for (int i = 0; i < shards.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index e6a9d98eb17..8b26fd6c04f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.shards; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -59,7 +59,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA @Override protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); } @Override @@ -70,7 +70,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA @Override protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener) { ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Set nodeIds = new HashSet<>(); GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 457b6e69383..2654ac0c269 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.SnapshotId; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.SnapshotInfo; @@ -66,7 +66,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction 0) { - String[] indices = indexNameExpressionResolver.concreteIndices(currentState, request); + String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); for (String filteredIndex : indices) { IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex); if (indexMetaData != null) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index ec7017160c0..f8304bf76a9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.search.suggest.completion.CompletionStats; @@ -48,7 +48,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { private QueryCacheStats queryCache; private CompletionStats completion; private SegmentsStats segments; - private PercolateStats percolate; + private PercolatorQueryCacheStats percolatorCache; private ClusterStatsIndices() { } @@ -62,7 +62,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { this.queryCache = new QueryCacheStats(); this.completion = new CompletionStats(); this.segments = new SegmentsStats(); - this.percolate = new PercolateStats(); + this.percolatorCache = new PercolatorQueryCacheStats(); for (ClusterStatsNodeResponse r : nodeResponses) { for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { @@ -85,7 +85,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.add(shardCommonStats.queryCache); completion.add(shardCommonStats.completion); segments.add(shardCommonStats.segments); - percolate.add(shardCommonStats.percolate); + percolatorCache.add(shardCommonStats.percolatorCache); } } @@ -128,8 +128,8 @@ public class ClusterStatsIndices implements ToXContent, Streamable { return segments; } - public PercolateStats getPercolate() { - return percolate; + public PercolatorQueryCacheStats getPercolatorCache() { + return percolatorCache; } @Override @@ -142,7 +142,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache = QueryCacheStats.readQueryCacheStats(in); completion = CompletionStats.readCompletionStats(in); segments = SegmentsStats.readSegmentsStats(in); - percolate = PercolateStats.readPercolateStats(in); + percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in); } @Override @@ -155,7 +155,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.writeTo(out); completion.writeTo(out); segments.writeTo(out); - percolate.writeTo(out); + percolatorCache.writeTo(out); } public static ClusterStatsIndices readIndicesStats(StreamInput in) throws IOException { @@ -178,7 +178,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { queryCache.toXContent(builder, params); completion.toXContent(builder, params); segments.toXContent(builder, params); - percolate.toXContent(builder, params); + percolatorCache.toXContent(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 5604616ed39..6020aa1a104 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -302,7 +302,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable { int availableProcessors; int allocatedProcessors; - long availableMemory; final ObjectIntHashMap names; public OsStats() { @@ -326,15 +325,10 @@ public class ClusterStatsNodes implements ToXContent, Streamable { return allocatedProcessors; } - public ByteSizeValue getAvailableMemory() { - return new ByteSizeValue(availableMemory); - } - @Override public void readFrom(StreamInput in) throws IOException { availableProcessors = in.readVInt(); allocatedProcessors = in.readVInt(); - availableMemory = in.readLong(); int size = in.readVInt(); names.clear(); for (int i = 0; i < size; i++) { @@ -346,7 +340,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable { public void writeTo(StreamOutput out) throws IOException { out.writeVInt(availableProcessors); out.writeVInt(allocatedProcessors); - out.writeLong(availableMemory); out.writeVInt(names.size()); for (ObjectIntCursor name : names) { out.writeString(name.key); @@ -365,9 +358,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable { static final XContentBuilderString ALLOCATED_PROCESSORS = new XContentBuilderString("allocated_processors"); static final XContentBuilderString NAME = new XContentBuilderString("name"); static final XContentBuilderString NAMES = new XContentBuilderString("names"); - static final XContentBuilderString MEM = new XContentBuilderString("mem"); - static final XContentBuilderString TOTAL = new XContentBuilderString("total"); - static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes"); static final XContentBuilderString COUNT = new XContentBuilderString("count"); } @@ -375,10 +365,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors); builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors); - builder.startObject(Fields.MEM); - builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, availableMemory); - builder.endObject(); - builder.startArray(Fields.NAMES); for (ObjectIntCursor name : names) { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 4087fe6cfce..6d1614eb485 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -28,10 +28,10 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -56,7 +56,7 @@ public class TransportClusterStatsAction extends TransportNodesAction shardsStats = new ArrayList<>(); for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { // only report on fully started shards - shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); + shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); } } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 105d596bad8..370b668f659 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -22,11 +22,11 @@ package org.elasticsearch.action.admin.cluster.tasks; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index d37053e056b..218b84e68ae 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException; @@ -90,11 +90,11 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction aliases = new HashSet<>(); for (AliasActions action : actions) { //expand indices - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), action.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices()); //collect the aliases Collections.addAll(aliases, action.aliases()); for (String index : concreteIndices) { - for (String alias : action.concreteAliases(state.metaData(), index)) { + for (String alias : action.concreteAliases(state.metaData(), index)) { AliasAction finalAction = new AliasAction(action.aliasAction()); finalAction.index(index); finalAction.alias(alias); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 5f92587f138..8ca09dbb67e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -22,11 +22,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; @@ -50,7 +50,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices); listener.onResponse(new AliasesExistResponse(result)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 9c2c2f03b57..061f916c2e0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.alias.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -53,7 +53,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); @SuppressWarnings("unchecked") ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 0541ac31505..0edae5eb1bc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -32,11 +32,11 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index bc229d72b1b..59cd95044cc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -77,7 +77,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc @Override protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { - IndexService service = indicesService.indexService(shardRouting.getIndexName()); + IndexService service = indicesService.indexService(shardRouting.index()); if (service != null) { IndexShard shard = service.getShardOrNull(shardRouting.id()); boolean clearedAtLeastOne = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index f8bbebf7db8..4fbfc7e72ab 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -23,17 +23,19 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -46,7 +48,8 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_INDICES_CLOSE_ENABLE_SETTING = + Setting.boolSetting("cluster.indices.close.enable", true, Property.Dynamic, Property.NodeScope); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, @@ -86,12 +89,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 98a002cc2fb..7b47a46a236 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.IndexAlreadyExistsException; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 28bf46f798f..489001d9b89 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -23,18 +23,23 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + /** * Delete index action. */ @@ -70,13 +75,13 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - if (concreteIndices.length == 0) { + final Set concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request))); + if (concreteIndices.isEmpty()) { listener.onResponse(new DeleteIndexResponse(true)); return; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index acda370d7ff..c451e50b77c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -23,11 +23,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; @@ -60,7 +60,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) { //make sure through indices options that the concrete indices call never throws IndexMissingException IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, indicesOptions, request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); } @Override @@ -68,7 +68,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. - indexNameExpressionResolver.concreteIndices(state, request); + indexNameExpressionResolver.concreteIndexNames(state, request); exists = true; } catch (IndexNotFoundException e) { exists = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index 2fd92451752..e1cf5be1aca 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.exists.types; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -57,12 +57,12 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices()); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); return; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index d2a8f1abcbf..8bb124d8fc4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 302bdafc471..3c22209813f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -22,12 +22,11 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -46,10 +45,9 @@ public class TransportShardFlushAction extends TransportReplicationAction listener) { ClusterState clusterState = clusterService.state(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); final AtomicInteger indexCounter = new AtomicInteger(); final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); final AtomicReferenceArray indexResponses = new AtomicReferenceArray<>(concreteIndices.length); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 3d11df97dee..e886af25fbb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -23,12 +23,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index bff9b4e2ab6..293f5a0e677 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -52,7 +52,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction im private String source; private boolean updateAllTypes = false; + private Index concreteIndex; public PutMappingRequest() { } @@ -90,6 +94,10 @@ public class PutMappingRequest extends AcknowledgedRequest im } else if (source.isEmpty()) { validationException = addValidationError("mapping source is empty", validationException); } + if (concreteIndex != null && (indices != null && indices.length > 0)) { + validationException = addValidationError("either concrete index or unresolved indices can be set, concrete index: [" + + concreteIndex + "] and indices: " + Arrays.asList(indices) , validationException); + } return validationException; } @@ -102,6 +110,22 @@ public class PutMappingRequest extends AcknowledgedRequest im return this; } + /** + * Sets a concrete index for this put mapping request. + */ + public PutMappingRequest setConcreteIndex(Index index) { + Objects.requireNonNull(indices, "index must not be null"); + this.concreteIndex = index; + return this; + } + + /** + * Returns a concrete index for this mapping or null if no concrete index is defined + */ + public Index getConcreteIndex() { + return concreteIndex; + } + /** * The indices the mappings will be put. */ @@ -259,6 +283,7 @@ public class PutMappingRequest extends AcknowledgedRequest im source = in.readString(); updateAllTypes = in.readBoolean(); readTimeout(in); + concreteIndex = in.readOptionalWriteable(Index::new); } @Override @@ -270,5 +295,6 @@ public class PutMappingRequest extends AcknowledgedRequest im out.writeString(source); out.writeBoolean(updateAllTypes); writeTimeout(out); + out.writeOptionalWriteable(concreteIndex); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 28f289b86c6..c21c40cf041 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; import java.util.Map; @@ -40,6 +41,11 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index b82c5d3a626..46535350154 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -22,15 +22,16 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataMappingService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -63,13 +64,19 @@ public class TransportPutMappingAction extends TransportMasterNodeAction listener) { try { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()}; PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 7ffb30b9534..50e79036694 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -23,15 +23,16 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -73,12 +74,12 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 8590fc210a0..01f37527374 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.recovery; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index bd879e0eaa9..34bf39daabd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -24,8 +24,8 @@ import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 2dd41f7801d..e3155614337 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -23,12 +23,11 @@ import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -48,10 +47,9 @@ public class TransportShardRefreshAction extends TransportReplicationAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); ImmutableOpenMap.Builder indexToSettingsBuilder = ImmutableOpenMap.builder(); - for (String concreteIndex : concreteIndices) { + for (Index concreteIndex : concreteIndices) { IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex); if (indexMetaData == null) { continue; @@ -93,7 +94,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction listener) { - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) .settings(request.settings()) + .setPreserveExisting(request.isPreserveExisting()) .ackTimeout(request.timeout()) .masterNodeTimeout(request.masterNodeTimeout()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java index ac6f6f3d496..6c21768dbb7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java @@ -29,8 +29,23 @@ public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterState private Settings settings; - public UpdateSettingsClusterStateUpdateRequest() { + private boolean preserveExisting = false; + /** + * Returns true iff the settings update should only add but not update settings. If the setting already exists + * it should not be overwritten by this update. The default is false + */ + public boolean isPreserveExisting() { + return preserveExisting; + } + + /** + * Iff set to true this settings update will only add settings not already set on an index. Existing settings remain + * unchanged. + */ + public UpdateSettingsClusterStateUpdateRequest setPreserveExisting(boolean preserveExisting) { + this.preserveExisting = preserveExisting; + return this; } /** diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index c654d6926fa..fb4525a6842 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -47,6 +47,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequesttrue iff the settings update should only add but not update settings. If the setting already exists + * it should not be overwritten by this update. The default is false + */ + public boolean isPreserveExisting() { + return preserveExisting; + } + + /** + * Iff set to true this settings update will only add settings not already set on an index. Existing settings remain + * unchanged. + */ + public UpdateSettingsRequest setPreserveExisting(boolean preserveExisting) { + this.preserveExisting = preserveExisting; + return this; + } + /** * Sets the settings to be updated (either json/yaml/properties format) */ @@ -149,6 +167,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequest listener) { final RoutingTable routingTables = state.routingTable(); final RoutingNodes routingNodes = state.getRoutingNodes(); - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); final Set shardIdsToFetch = new HashSet<>(); logger.trace("using cluster state version [{}] to determine shards", state.version()); @@ -115,7 +115,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc @Override protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); } private class AsyncShardStoresInfoFetches { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 676eec104b1..b130e6b378f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -32,16 +32,16 @@ import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.index.translog.TranslogStats; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndicesQueryCache; @@ -101,14 +101,14 @@ public class CommonStats implements Streamable, ToXContent { case Segments: segments = new SegmentsStats(); break; - case Percolate: - percolate = new PercolateStats(); + case PercolatorCache: + percolatorCache = new PercolatorQueryCacheStats(); break; case Translog: translog = new TranslogStats(); break; case Suggest: - suggest = new SuggestStats(); + // skip break; case RequestCache: requestCache = new RequestCacheStats(); @@ -123,7 +123,8 @@ public class CommonStats implements Streamable, ToXContent { } - public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) { + public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache, + IndexShard indexShard, CommonStatsFlags flags) { CommonStatsFlags.Flag[] setFlags = flags.getFlags(); @@ -168,14 +169,14 @@ public class CommonStats implements Streamable, ToXContent { case Segments: segments = indexShard.segmentStats(flags.includeSegmentFileSizes()); break; - case Percolate: - percolate = indexShard.percolateStats(); + case PercolatorCache: + percolatorCache = percolatorQueryCache.getStats(indexShard.shardId()); break; case Translog: translog = indexShard.translogStats(); break; case Suggest: - suggest = indexShard.suggestStats(); + // skip break; case RequestCache: requestCache = indexShard.requestCache().stats(); @@ -223,7 +224,7 @@ public class CommonStats implements Streamable, ToXContent { public FieldDataStats fieldData; @Nullable - public PercolateStats percolate; + public PercolatorQueryCacheStats percolatorCache; @Nullable public CompletionStats completion; @@ -234,9 +235,6 @@ public class CommonStats implements Streamable, ToXContent { @Nullable public TranslogStats translog; - @Nullable - public SuggestStats suggest; - @Nullable public RequestCacheStats requestCache; @@ -333,13 +331,13 @@ public class CommonStats implements Streamable, ToXContent { } else { fieldData.add(stats.getFieldData()); } - if (percolate == null) { - if (stats.getPercolate() != null) { - percolate = new PercolateStats(); - percolate.add(stats.getPercolate()); + if (percolatorCache == null) { + if (stats.getPercolatorCache() != null) { + percolatorCache = new PercolatorQueryCacheStats(); + percolatorCache.add(stats.getPercolatorCache()); } } else { - percolate.add(stats.getPercolate()); + percolatorCache.add(stats.getPercolatorCache()); } if (completion == null) { if (stats.getCompletion() != null) { @@ -365,14 +363,6 @@ public class CommonStats implements Streamable, ToXContent { } else { translog.add(stats.getTranslog()); } - if (suggest == null) { - if (stats.getSuggest() != null) { - suggest = new SuggestStats(); - suggest.add(stats.getSuggest()); - } - } else { - suggest.add(stats.getSuggest()); - } if (requestCache == null) { if (stats.getRequestCache() != null) { requestCache = new RequestCacheStats(); @@ -447,8 +437,8 @@ public class CommonStats implements Streamable, ToXContent { } @Nullable - public PercolateStats getPercolate() { - return percolate; + public PercolatorQueryCacheStats getPercolatorCache() { + return percolatorCache; } @Nullable @@ -466,11 +456,6 @@ public class CommonStats implements Streamable, ToXContent { return translog; } - @Nullable - public SuggestStats getSuggest() { - return suggest; - } - @Nullable public RequestCacheStats getRequestCache() { return requestCache; @@ -489,7 +474,7 @@ public class CommonStats implements Streamable, ToXContent { /** * Utility method which computes total memory by adding - * FieldData, Percolate, Segments (memory, index writer, version map) + * FieldData, PercolatorCache, Segments (memory, index writer, version map) */ public ByteSizeValue getTotalMemory() { long size = 0; @@ -499,9 +484,6 @@ public class CommonStats implements Streamable, ToXContent { if (this.getQueryCache() != null) { size += this.getQueryCache().getMemorySizeInBytes(); } - if (this.getPercolate() != null) { - size += this.getPercolate().getMemorySizeInBytes(); - } if (this.getSegments() != null) { size += this.getSegments().getMemoryInBytes() + this.getSegments().getIndexWriterMemoryInBytes() + @@ -547,7 +529,7 @@ public class CommonStats implements Streamable, ToXContent { fieldData = FieldDataStats.readFieldDataStats(in); } if (in.readBoolean()) { - percolate = PercolateStats.readPercolateStats(in); + percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in); } if (in.readBoolean()) { completion = CompletionStats.readCompletionStats(in); @@ -556,7 +538,6 @@ public class CommonStats implements Streamable, ToXContent { segments = SegmentsStats.readSegmentsStats(in); } translog = in.readOptionalStreamable(TranslogStats::new); - suggest = in.readOptionalStreamable(SuggestStats::new); requestCache = in.readOptionalStreamable(RequestCacheStats::new); recoveryStats = in.readOptionalStreamable(RecoveryStats::new); } @@ -629,11 +610,11 @@ public class CommonStats implements Streamable, ToXContent { out.writeBoolean(true); fieldData.writeTo(out); } - if (percolate == null) { + if (percolatorCache == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - percolate.writeTo(out); + percolatorCache.writeTo(out); } if (completion == null) { out.writeBoolean(false); @@ -648,7 +629,6 @@ public class CommonStats implements Streamable, ToXContent { segments.writeTo(out); } out.writeOptionalStreamable(translog); - out.writeOptionalStreamable(suggest); out.writeOptionalStreamable(requestCache); out.writeOptionalStreamable(recoveryStats); } @@ -689,8 +669,8 @@ public class CommonStats implements Streamable, ToXContent { if (fieldData != null) { fieldData.toXContent(builder, params); } - if (percolate != null) { - percolate.toXContent(builder, params); + if (percolatorCache != null) { + percolatorCache.toXContent(builder, params); } if (completion != null) { completion.toXContent(builder, params); @@ -701,9 +681,6 @@ public class CommonStats implements Streamable, ToXContent { if (translog != null) { translog.toXContent(builder, params); } - if (suggest != null) { - suggest.toXContent(builder, params); - } if (requestCache != null) { requestCache.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index 39608c72a15..e2f250dd577 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -190,7 +190,7 @@ public class CommonStatsFlags implements Streamable, Cloneable { out.writeStringArrayNullable(groups); out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); - if (out.getVersion().onOrAfter(Version.V_5_0_0)) { + if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) { out.writeBoolean(includeSegmentFileSizes); } } @@ -208,7 +208,7 @@ public class CommonStatsFlags implements Streamable, Cloneable { groups = in.readStringArray(); fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); - if (in.getVersion().onOrAfter(Version.V_5_0_0)) { + if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) { includeSegmentFileSizes = in.readBoolean(); } else { includeSegmentFileSizes = false; @@ -240,11 +240,11 @@ public class CommonStatsFlags implements Streamable, Cloneable { FieldData("fielddata"), Docs("docs"), Warmer("warmer"), - Percolate("percolate"), + PercolatorCache("percolator_cache"), Completion("completion"), Segments("segments"), Translog("translog"), - Suggest("suggest"), + Suggest("suggest"), // unused RequestCache("request_cache"), Recovery("recovery"); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java index 3a74d896fba..4a2d137593e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequest.java @@ -185,12 +185,12 @@ public class IndicesStatsRequest extends BroadcastRequest { } public IndicesStatsRequest percolate(boolean percolate) { - flags.set(Flag.Percolate, percolate); + flags.set(Flag.PercolatorCache, percolate); return this; } public boolean percolate() { - return flags.isSet(Flag.Percolate); + return flags.isSet(Flag.PercolatorCache); } public IndicesStatsRequest segments(boolean segments) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index 0ae21a3ac09..cad919cbd18 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -152,11 +152,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder return this; } - public IndicesStatsRequestBuilder setSuggest(boolean suggest) { - request.suggest(suggest); - return this; - } - public IndicesStatsRequestBuilder setRequestCache(boolean requestCache) { request.requestCache(requestCache); return this; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 2189973d9b7..8c12dfa9fda 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; @@ -140,7 +140,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.fieldDataFields(request.fieldDataFields()); } if (request.percolate()) { - flags.set(CommonStatsFlags.Flag.Percolate); + flags.set(CommonStatsFlags.Flag.PercolatorCache); } if (request.segments()) { flags.set(CommonStatsFlags.Flag.Segments); @@ -163,6 +163,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction< flags.set(CommonStatsFlags.Flag.Recovery); } - return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats()); + return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 9eab0f80e50..0763f232711 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index a43397e48dc..672ca1a9080 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -22,12 +22,12 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index c5fed57d013..02aad2f7ff4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -21,13 +21,13 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index 6b37f56ed4a..cf288e0cc6f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -23,13 +23,13 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index f3cf2da9fdd..cdf6f585e53 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.PrimaryMissingActionException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -35,6 +34,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 527adeaa3e5..403456cb903 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index fe02a1541a0..320f0696605 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -29,13 +29,13 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 707bf8de57f..9d9b36ba072 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -37,16 +37,17 @@ import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.update.TransportUpdateAction; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexAlreadyExistsException; @@ -245,17 +246,18 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { @@ -304,7 +306,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { @@ -314,7 +316,7 @@ public class TransportBulkAction extends HandledTransportAction list = requestsByShard.get(shardId); if (list == null) { @@ -356,18 +358,19 @@ public class TransportBulkAction extends HandledTransportAction responses, int idx, final ConcreteIndices concreteIndices, final MetaData metaData) { - String concreteIndex = concreteIndices.getConcreteIndex(request.index()); + Index concreteIndex = concreteIndices.getConcreteIndex(request.index()); Exception unavailableException = null; if (concreteIndex == null) { try { @@ -397,9 +400,9 @@ public class TransportBulkAction extends HandledTransportAction indices = new HashMap<>(); + private final Map indices = new HashMap<>(); ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) { this.state = state; this.indexNameExpressionResolver = indexNameExpressionResolver; } - String getConcreteIndex(String indexOrAlias) { + Index getConcreteIndex(String indexOrAlias) { return indices.get(indexOrAlias); } - String resolveIfAbsent(DocumentRequest request) { - String concreteIndex = indices.get(request.index()); + Index resolveIfAbsent(DocumentRequest request) { + Index concreteIndex = indices.get(request.index()); if (concreteIndex == null) { concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request); indices.put(request.index(), concreteIndex); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 30f6b03a116..76402df8aa4 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -35,12 +35,12 @@ import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; @@ -74,17 +74,19 @@ public class TransportShardBulkAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) { - final IndexService indexService = indicesService.indexServiceSafe(request.index()); - final IndexShard indexShard = indexService.getShard(request.shardId().id()); + ShardId shardId = request.shardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); long[] preVersions = new long[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length]; diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 3ded0ed8e83..783fab08bae 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -28,13 +28,12 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -60,10 +59,10 @@ public class TransportDeleteAction extends TransportReplicationAction> implements Streamable, */ protected abstract T valueOf(String value, String optionalFormat); + /** + * @param value + * The value to be converted to a String + * @param optionalFormat + * A string describing how to print the specified value. Whether + * this parameter is supported depends on the implementation. If + * optionalFormat is specified and the implementation doesn't + * support it an {@link UnsupportedOperationException} is thrown + */ + public abstract String stringValueOf(Object value, String optionalFormat); + /** * Merges the provided stats into this stats instance. */ @@ -274,6 +287,18 @@ public abstract class FieldStats> implements Streamable, return java.lang.Long.valueOf(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof Number) { + return java.lang.Long.toString(((Number) value).longValue()); + } else { + throw new IllegalArgumentException("value must be a Long: " + value); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -327,6 +352,18 @@ public abstract class FieldStats> implements Streamable, return java.lang.Float.valueOf(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof Number) { + return java.lang.Float.toString(((Number) value).floatValue()); + } else { + throw new IllegalArgumentException("value must be a Float: " + value); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -380,6 +417,18 @@ public abstract class FieldStats> implements Streamable, return java.lang.Double.valueOf(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof Number) { + return java.lang.Double.toString(((Number) value).doubleValue()); + } else { + throw new IllegalArgumentException("value must be a Double: " + value); + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -437,6 +486,18 @@ public abstract class FieldStats> implements Streamable, return new BytesRef(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (optionalFormat != null) { + throw new UnsupportedOperationException("custom format isn't supported"); + } + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } else { + throw new IllegalArgumentException("value must be a BytesRef: " + value); + } + } + @Override protected void toInnerXContent(XContentBuilder builder) throws IOException { builder.field(Fields.MIN_VALUE, getMinValueAsString()); @@ -490,6 +551,25 @@ public abstract class FieldStats> implements Streamable, return dateFormatter.parser().parseMillis(value); } + @Override + public String stringValueOf(Object value, String optionalFormat) { + FormatDateTimeFormatter dateFormatter = this.dateFormatter; + if (optionalFormat != null) { + dateFormatter = Joda.forPattern(optionalFormat); + } + long millis; + if (value instanceof java.lang.Long) { + millis = ((java.lang.Long) value).longValue(); + } else if (value instanceof DateTime) { + millis = ((DateTime) value).getMillis(); + } else if (value instanceof BytesRef) { + millis = dateFormatter.parser().parseMillis(((BytesRef) value).utf8ToString()); + } else { + throw new IllegalArgumentException("value must be either a DateTime or a long: " + value); + } + return dateFormatter.printer().print(millis); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -504,6 +584,28 @@ public abstract class FieldStats> implements Streamable, } + public static class Ip extends Long { + + public Ip(int maxDoc, int docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + super(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + } + + protected Ip(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + super(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + } + + public Ip() { + } + + @Override + public String stringValueOf(Object value, String optionalFormat) { + if (value instanceof BytesRef) { + return super.stringValueOf(IpFieldMapper.ipToLong(((BytesRef) value).utf8ToString()), optionalFormat); + } + return super.stringValueOf(value, optionalFormat); + } + } + public static FieldStats read(StreamInput in) throws IOException { FieldStats stats; byte type = in.readByte(); diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java index 42360c5e0eb..de56a0f5c2e 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java @@ -28,13 +28,13 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 2d6bafc9623..b84493c4dca 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -22,12 +22,12 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index 7f5de65c614..1858ac8ba71 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -22,10 +22,10 @@ package org.elasticsearch.action.get; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction implements Do return this.versionType; } - private Version getVersion(MetaData metaData, String concreteIndex) { - // this can go away in 3.0 but is here now for easy backporting - since in 2.x we need the version on the timestamp stuff - final IndexMetaData indexMetaData = metaData.getIndices().get(concreteIndex); - if (indexMetaData == null) { - throw new IndexNotFoundException(concreteIndex); - } - return Version.indexCreated(indexMetaData.getSettings()); - } public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) { // resolve the routing if needed @@ -600,8 +593,7 @@ public class IndexRequest extends ReplicationRequest implements Do // resolve timestamp if provided externally if (timestamp != null) { timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp, - mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER, - getVersion(metaData, concreteIndex)); + mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER); } if (mappingMd != null) { // might as well check for routing here @@ -645,7 +637,7 @@ public class IndexRequest extends ReplicationRequest implements Do // assigned again because mappingMd and // mappingMd#timestamp() are not null assert mappingMd != null; - timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex)); + timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter()); } } } diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index fdd018c51f2..9be8e4cef89 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportReplicationAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; @@ -36,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -69,6 +69,7 @@ public class TransportIndexAction extends TransportReplicationAction shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception { // validate, if routing is required, that we got routing - IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex()); + IndexMetaData indexMetaData = metaData.getIndexSafe(request.shardId().getIndex()); MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); if (mappingMd != null && mappingMd.routing().required()) { if (request.routing() == null) { @@ -205,8 +207,7 @@ public class TransportIndexAction extends TransportReplicationAction actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); if (bulkRequest.requests().isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java b/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java index 39a4b1fa4e8..62716c6dc0d 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/IngestProxyActionFilter.java @@ -29,9 +29,9 @@ import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilterChain; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index aafd9ee75a4..e1a34413e2c 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -26,12 +26,12 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.PipelineStore; diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java index 30efbe1b0fa..bc40a8368f0 100644 --- a/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java +++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java @@ -23,13 +23,14 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.Pipeline; -import org.elasticsearch.ingest.core.Processor; import org.elasticsearch.ingest.core.CompoundProcessor; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.ingest.processor.TrackingResultProcessor.decorate; + class SimulateExecutionService { private static final String THREAD_POOL_NAME = ThreadPool.Names.MANAGEMENT; @@ -40,40 +41,16 @@ class SimulateExecutionService { this.threadPool = threadPool; } - void executeVerboseDocument(Processor processor, IngestDocument ingestDocument, List processorResultList) throws Exception { - if (processor instanceof CompoundProcessor) { - CompoundProcessor cp = (CompoundProcessor) processor; - try { - for (Processor p : cp.getProcessors()) { - executeVerboseDocument(p, ingestDocument, processorResultList); - } - } catch (Exception e) { - for (Processor p : cp.getOnFailureProcessors()) { - executeVerboseDocument(p, ingestDocument, processorResultList); - } - } - } else { - try { - processor.execute(ingestDocument); - processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument))); - } catch (Exception e) { - processorResultList.add(new SimulateProcessorResult(processor.getTag(), e)); - throw e; - } - } - } - SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) { if (verbose) { List processorResultList = new ArrayList<>(); - IngestDocument currentIngestDocument = new IngestDocument(ingestDocument); - CompoundProcessor pipelineProcessor = new CompoundProcessor(pipeline.getProcessors(), pipeline.getOnFailureProcessors()); + CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList); try { - executeVerboseDocument(pipelineProcessor, currentIngestDocument, processorResultList); + verbosePipelineProcessor.execute(ingestDocument); + return new SimulateDocumentVerboseResult(processorResultList); } catch (Exception e) { - return new SimulateDocumentBaseResult(e); + return new SimulateDocumentVerboseResult(processorResultList); } - return new SimulateDocumentVerboseResult(processorResultList); } else { try { pipeline.execute(ingestDocument); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java b/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java index 74537379d1d..ac49fed763a 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/MultiPercolateRequest.java @@ -163,11 +163,7 @@ public class MultiPercolateRequest extends ActionRequest @Override public List subRequests() { - List indicesRequests = new ArrayList<>(); - for (PercolateRequest percolateRequest : this.requests) { - indicesRequests.addAll(percolateRequest.subRequests()); - } - return indicesRequests; + return requests; } private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java index e69da6bf519..c9887cba03f 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequest.java @@ -19,10 +19,12 @@ package org.elasticsearch.action.percolate; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesArray; @@ -43,49 +45,37 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A request to execute a percolate operation. */ -public class PercolateRequest extends BroadcastRequest implements CompositeIndicesRequest { +public class PercolateRequest extends ActionRequest implements IndicesRequest.Replaceable { + protected String[] indices; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); private String documentType; private String routing; private String preference; - private GetRequest getRequest; private boolean onlyCount; + private GetRequest getRequest; private BytesReference source; - private BytesReference docSource; - - // Used internally in order to compute tookInMillis, TransportBroadcastAction itself doesn't allow - // to hold it temporarily in an easy way - long startTime; - - /** - * Constructor only for internal usage. - */ - public PercolateRequest() { + public String[] indices() { + return indices; } - PercolateRequest(PercolateRequest request, BytesReference docSource) { - this.indices = request.indices(); - this.documentType = request.documentType(); - this.routing = request.routing(); - this.preference = request.preference(); - this.source = request.source; - this.docSource = docSource; - this.onlyCount = request.onlyCount; - this.startTime = request.startTime; + public final PercolateRequest indices(String... indices) { + this.indices = indices; + return this; } - @Override - public List subRequests() { - List requests = new ArrayList<>(); - requests.add(this); - if (getRequest != null) { - requests.add(getRequest); - } - return requests; + public IndicesOptions indicesOptions() { + return indicesOptions; } + public PercolateRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + /** * Getter for {@link #documentType(String)} */ @@ -244,13 +234,9 @@ public class PercolateRequest extends BroadcastRequest impleme return this; } - BytesReference docSource() { - return docSource; - } - @Override public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); + ActionRequestValidationException validationException = null; if (documentType == null) { validationException = addValidationError("type is missing", validationException); } @@ -266,12 +252,12 @@ public class PercolateRequest extends BroadcastRequest impleme @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - startTime = in.readVLong(); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); documentType = in.readString(); routing = in.readOptionalString(); preference = in.readOptionalString(); source = in.readBytesReference(); - docSource = in.readBytesReference(); if (in.readBoolean()) { getRequest = new GetRequest(); getRequest.readFrom(in); @@ -282,12 +268,12 @@ public class PercolateRequest extends BroadcastRequest impleme @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVLong(startTime); + out.writeStringArrayNullable(indices); + indicesOptions.writeIndicesOptions(out); out.writeString(documentType); out.writeOptionalString(routing); out.writeOptionalString(preference); out.writeBytesReference(source); - out.writeBytesReference(docSource); if (getRequest != null) { out.writeBoolean(true); getRequest.writeTo(out); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java index 472938cfbf1..9286601da69 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java @@ -18,8 +18,9 @@ */ package org.elasticsearch.action.percolate; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -36,7 +37,7 @@ import java.util.Map; /** * A builder the easy to use of defining a percolate request. */ -public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder { +public class PercolateRequestBuilder extends ActionRequestBuilder { private PercolateSourceBuilder sourceBuilder; @@ -44,6 +45,16 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder sort) { sourceBuilder().addSort(sort); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java index 13c2526a7a3..9400cece8b6 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.highlight.HighlightField; @@ -47,6 +46,8 @@ import java.util.Map; public class PercolateResponse extends BroadcastResponse implements Iterable, ToXContent { public static final Match[] EMPTY = new Match[0]; + // PercolatorQuery emits this score if no 'query' is defined in the percolate request + public final static float NO_SCORE = 0.0f; private long tookInMillis; private Match[] matches; @@ -65,15 +66,6 @@ public class PercolateResponse extends BroadcastResponse implements Iterable shardFailures, long tookInMillis, Match[] matches) { - super(totalShards, successfulShards, failedShards, shardFailures); - if (tookInMillis < 0) { - throw new IllegalArgumentException("tookInMillis must be positive but was: " + tookInMillis); - } - this.tookInMillis = tookInMillis; - this.matches = matches; - } - PercolateResponse() { } @@ -136,10 +128,10 @@ public class PercolateResponse extends BroadcastResponse implements Iterable ids; - private Map> hls; - private boolean onlyCount; - private int requestedSize; - - private InternalAggregations aggregations; - private List pipelineAggregators; - - PercolateShardResponse() { - } - - public PercolateShardResponse(TopDocs topDocs, Map ids, Map> hls, PercolateContext context) { - super(context.indexShard().shardId()); - this.topDocs = topDocs; - this.ids = ids; - this.hls = hls; - this.onlyCount = context.isOnlyCount(); - this.requestedSize = context.size(); - QuerySearchResult result = context.queryResult(); - if (result != null) { - if (result.aggregations() != null) { - this.aggregations = (InternalAggregations) result.aggregations(); - } - this.pipelineAggregators = result.pipelineAggregators(); - } - } - - public TopDocs topDocs() { - return topDocs; - } - - /** - * Returns per match the percolator query id. The key is the Lucene docId of the matching percolator query. - */ - public Map ids() { - return ids; - } - - public int requestedSize() { - return requestedSize; - } - - /** - * Returns per match the highlight snippets. The key is the Lucene docId of the matching percolator query. - */ - public Map> hls() { - return hls; - } - - public InternalAggregations aggregations() { - return aggregations; - } - - public List pipelineAggregators() { - return pipelineAggregators; - } - - public boolean onlyCount() { - return onlyCount; - } - - public boolean isEmpty() { - return topDocs.totalHits == 0; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - onlyCount = in.readBoolean(); - requestedSize = in.readVInt(); - topDocs = Lucene.readTopDocs(in); - int size = in.readVInt(); - ids = new HashMap<>(size); - for (int i = 0; i < size; i++) { - ids.put(in.readVInt(), in.readString()); - } - size = in.readVInt(); - hls = new HashMap<>(size); - for (int i = 0; i < size; i++) { - int docId = in.readVInt(); - int mSize = in.readVInt(); - Map fields = new HashMap<>(); - for (int j = 0; j < mSize; j++) { - fields.put(in.readString(), HighlightField.readHighlightField(in)); - } - hls.put(docId, fields); - } - aggregations = InternalAggregations.readOptionalAggregations(in); - if (in.readBoolean()) { - int pipelineAggregatorsSize = in.readVInt(); - List pipelineAggregators = new ArrayList<>(pipelineAggregatorsSize); - for (int i = 0; i < pipelineAggregatorsSize; i++) { - BytesReference type = in.readBytesReference(); - PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in); - pipelineAggregators.add((SiblingPipelineAggregator) pipelineAggregator); - } - this.pipelineAggregators = pipelineAggregators; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(onlyCount); - out.writeVLong(requestedSize); - Lucene.writeTopDocs(out, topDocs); - out.writeVInt(ids.size()); - for (Map.Entry entry : ids.entrySet()) { - out.writeVInt(entry.getKey()); - out.writeString(entry.getValue()); - } - out.writeVInt(hls.size()); - for (Map.Entry> entry1 : hls.entrySet()) { - out.writeVInt(entry1.getKey()); - out.writeVInt(entry1.getValue().size()); - for (Map.Entry entry2 : entry1.getValue().entrySet()) { - out.writeString(entry2.getKey()); - entry2.getValue().writeTo(out); - } - } - out.writeOptionalStreamable(aggregations); - if (pipelineAggregators == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(pipelineAggregators.size()); - for (PipelineAggregator pipelineAggregator : pipelineAggregators) { - out.writeBytesReference(pipelineAggregator.type().stream()); - pipelineAggregator.writeTo(out); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java index d687d48fb0c..5a5924f7883 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java @@ -48,13 +48,13 @@ import java.util.Map; public class PercolateSourceBuilder extends ToXContentToBytes { private DocBuilder docBuilder; - private QueryBuilder queryBuilder; + private QueryBuilder queryBuilder; private Integer size; - private List sorts; + private List> sorts; private Boolean trackScores; private HighlightBuilder highlightBuilder; private List> aggregationBuilders; - private List pipelineAggregationBuilders; + private List> pipelineAggregationBuilders; /** * Sets the document to run the percolate queries against. @@ -68,7 +68,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { * Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based * on this query. */ - public PercolateSourceBuilder setQueryBuilder(QueryBuilder queryBuilder) { + public PercolateSourceBuilder setQueryBuilder(QueryBuilder queryBuilder) { this.queryBuilder = queryBuilder; return this; } @@ -98,7 +98,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { * * By default the matching percolator queries are returned in an undefined order. */ - public PercolateSourceBuilder addSort(SortBuilder sort) { + public PercolateSourceBuilder addSort(SortBuilder sort) { if (sorts == null) { sorts = new ArrayList<>(); } @@ -137,7 +137,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { /** * Add an aggregation definition. */ - public PercolateSourceBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) { + public PercolateSourceBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) { if (pipelineAggregationBuilders == null) { pipelineAggregationBuilders = new ArrayList<>(); } @@ -160,10 +160,8 @@ public class PercolateSourceBuilder extends ToXContentToBytes { } if (sorts != null) { builder.startArray("sort"); - for (SortBuilder sort : sorts) { - builder.startObject(); + for (SortBuilder sort : sorts) { sort.toXContent(builder, params); - builder.endObject(); } builder.endArray(); } @@ -182,7 +180,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes { } } if (pipelineAggregationBuilders != null) { - for (PipelineAggregatorBuilder aggregation : pipelineAggregationBuilders) { + for (PipelineAggregatorBuilder aggregation : pipelineAggregationBuilders) { aggregation.toXContent(builder, params); } } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java index d86d91c654e..bf0d79d884e 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java @@ -19,114 +19,91 @@ package org.elasticsearch.action.percolate; -import com.carrotsearch.hppc.IntArrayList; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; -import org.elasticsearch.action.get.TransportMultiGetAction; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.engine.DocumentMissingException; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.percolator.PercolatorService; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -/** - */ public class TransportMultiPercolateAction extends HandledTransportAction { - private final ClusterService clusterService; - private final PercolatorService percolatorService; - - private final TransportMultiGetAction multiGetAction; - private final TransportShardMultiPercolateAction shardMultiPercolateAction; + private final Client client; + private final ParseFieldMatcher parseFieldMatcher; + private final IndicesQueriesRegistry queryRegistry; + private final AggregatorParsers aggParsers; @Inject - public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportShardMultiPercolateAction shardMultiPercolateAction, - ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, - TransportMultiGetAction multiGetAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Client client, IndicesQueriesRegistry queryRegistry, + AggregatorParsers aggParsers) { super(settings, MultiPercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiPercolateRequest::new); - this.shardMultiPercolateAction = shardMultiPercolateAction; - this.clusterService = clusterService; - this.percolatorService = percolatorService; - this.multiGetAction = multiGetAction; + this.client = client; + this.aggParsers = aggParsers; + this.parseFieldMatcher = new ParseFieldMatcher(settings); + this.queryRegistry = queryRegistry; } @Override - protected void doExecute(final MultiPercolateRequest request, final ActionListener listener) { - final ClusterState clusterState = clusterService.state(); - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - - final List percolateRequests = new ArrayList<>(request.requests().size()); - // Can have a mixture of percolate requests. (normal percolate requests & percolate existing doc), - // so we need to keep track for what percolate request we had a get request - final IntArrayList getRequestSlots = new IntArrayList(); - List existingDocsRequests = new ArrayList<>(); - for (int slot = 0; slot < request.requests().size(); slot++) { - PercolateRequest percolateRequest = request.requests().get(slot); - percolateRequest.startTime = System.currentTimeMillis(); - percolateRequests.add(percolateRequest); - if (percolateRequest.getRequest() != null) { - existingDocsRequests.add(percolateRequest.getRequest()); - getRequestSlots.add(slot); + protected void doExecute(MultiPercolateRequest request, ActionListener listener) { + List> getRequests = new ArrayList<>(); + for (int i = 0; i < request.requests().size(); i++) { + GetRequest getRequest = request.requests().get(i).getRequest(); + if (getRequest != null) { + getRequests.add(new Tuple<>(i, getRequest)); } } - - if (!existingDocsRequests.isEmpty()) { - final MultiGetRequest multiGetRequest = new MultiGetRequest(); - for (GetRequest getRequest : existingDocsRequests) { - multiGetRequest.add( - new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id()) - .routing(getRequest.routing()) - ); + if (getRequests.isEmpty()) { + innerDoExecute(request, listener, Collections.emptyMap(), new HashMap<>()); + } else { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + for (Tuple tuple : getRequests) { + GetRequest getRequest = tuple.v2(); + multiGetRequest.add(new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())); } - - multiGetAction.execute(multiGetRequest, new ActionListener() { - + client.multiGet(multiGetRequest, new ActionListener() { @Override - public void onResponse(MultiGetResponse multiGetItemResponses) { - for (int i = 0; i < multiGetItemResponses.getResponses().length; i++) { - MultiGetItemResponse itemResponse = multiGetItemResponses.getResponses()[i]; - int slot = getRequestSlots.get(i); - if (!itemResponse.isFailed()) { - GetResponse getResponse = itemResponse.getResponse(); - if (getResponse.isExists()) { - PercolateRequest originalRequest = (PercolateRequest) percolateRequests.get(slot); - percolateRequests.set(slot, new PercolateRequest(originalRequest, getResponse.getSourceAsBytesRef())); - } else { - logger.trace("mpercolate existing doc, item[{}] doesn't exist", slot); - percolateRequests.set(slot, new DocumentMissingException(null, getResponse.getType(), getResponse.getId())); - } + public void onResponse(MultiGetResponse response) { + Map getResponseSources = new HashMap<>(response.getResponses().length); + Map preFailures = new HashMap<>(); + for (int i = 0; i < response.getResponses().length; i++) { + MultiGetItemResponse itemResponse = response.getResponses()[i]; + int originalSlot = getRequests.get(i).v1(); + if (itemResponse.isFailed()) { + preFailures.put(originalSlot, new MultiPercolateResponse.Item(itemResponse.getFailure().getFailure())); } else { - logger.trace("mpercolate existing doc, item[{}] failure {}", slot, itemResponse.getFailure()); - percolateRequests.set(slot, itemResponse.getFailure()); + if (itemResponse.getResponse().isExists()) { + getResponseSources.put(originalSlot, itemResponse.getResponse().getSourceAsBytesRef()); + } else { + GetRequest getRequest = getRequests.get(i).v2(); + preFailures.put(originalSlot, new MultiPercolateResponse.Item(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", getRequest.index(), getRequest.type(), getRequest.id()))); + } } } - new ASyncAction(request, percolateRequests, listener, clusterState).run(); + innerDoExecute(request, listener, getResponseSources, preFailures); } @Override @@ -134,200 +111,81 @@ public class TransportMultiPercolateAction extends HandledTransportAction finalListener; - final Map requestsByShard; - final MultiPercolateRequest multiPercolateRequest; - final List percolateRequests; - - final Map shardToSlots; - final AtomicInteger expectedOperations; - final AtomicArray reducedResponses; - final AtomicReferenceArray expectedOperationsPerItem; - final AtomicReferenceArray responsesByItemAndShard; - - ASyncAction(MultiPercolateRequest multiPercolateRequest, List percolateRequests, ActionListener finalListener, ClusterState clusterState) { - this.finalListener = finalListener; - this.multiPercolateRequest = multiPercolateRequest; - this.percolateRequests = percolateRequests; - responsesByItemAndShard = new AtomicReferenceArray<>(percolateRequests.size()); - expectedOperationsPerItem = new AtomicReferenceArray<>(percolateRequests.size()); - reducedResponses = new AtomicArray<>(percolateRequests.size()); - - // Resolving concrete indices and routing and grouping the requests by shard - requestsByShard = new HashMap<>(); - // Keep track what slots belong to what shard, in case a request to a shard fails on all copies - shardToSlots = new HashMap<>(); - int expectedResults = 0; - for (int slot = 0; slot < percolateRequests.size(); slot++) { - Object element = percolateRequests.get(slot); - assert element != null; - if (element instanceof PercolateRequest) { - PercolateRequest percolateRequest = (PercolateRequest) element; - String[] concreteIndices; - try { - concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, percolateRequest); - } catch (IndexNotFoundException e) { - reducedResponses.set(slot, e); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(0)); - expectedOperationsPerItem.set(slot, new AtomicInteger(0)); - continue; - } - Map> routing = indexNameExpressionResolver.resolveSearchRouting(clusterState, percolateRequest.routing(), percolateRequest.indices()); - // TODO: I only need shardIds, ShardIterator(ShardRouting) is only needed in TransportShardMultiPercolateAction - GroupShardsIterator shards = clusterService.operationRouting().searchShards( - clusterState, concreteIndices, routing, percolateRequest.preference() - ); - if (shards.size() == 0) { - reducedResponses.set(slot, new UnavailableShardsException(null, "No shards available")); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(0)); - expectedOperationsPerItem.set(slot, new AtomicInteger(0)); - continue; - } - - // The shard id is used as index in the atomic ref array, so we need to find out how many shards there are regardless of routing: - int numShards = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, null); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(numShards)); - expectedOperationsPerItem.set(slot, new AtomicInteger(shards.size())); - for (ShardIterator shard : shards) { - ShardId shardId = shard.shardId(); - TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId); - if (requests == null) { - requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(shardId.getIndexName(), shardId.getId(), percolateRequest.preference())); - } - logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot); - requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest))); - - IntArrayList items = shardToSlots.get(shardId); - if (items == null) { - shardToSlots.put(shardId, items = new IntArrayList()); - } - items.add(slot); - } - expectedResults++; - } else if (element instanceof Throwable || element instanceof MultiGetResponse.Failure) { - logger.trace("item[{}] won't be executed, reason: {}", slot, element); - reducedResponses.set(slot, element); - responsesByItemAndShard.set(slot, new AtomicReferenceArray(0)); - expectedOperationsPerItem.set(slot, new AtomicInteger(0)); - } - } - expectedOperations = new AtomicInteger(expectedResults); - } - - void run() { - if (expectedOperations.get() == 0) { - finish(); - return; - } - - logger.trace("mpercolate executing for shards {}", requestsByShard.keySet()); - for (Map.Entry entry : requestsByShard.entrySet()) { - final ShardId shardId = entry.getKey(); - TransportShardMultiPercolateAction.Request shardRequest = entry.getValue(); - shardMultiPercolateAction.execute(shardRequest, new ActionListener() { - + private void innerDoExecute(MultiPercolateRequest request, ActionListener listener, Map getResponseSources, Map preFailures) { + try { + MultiSearchRequest multiSearchRequest = createMultiSearchRequest(request, getResponseSources, preFailures); + if (multiSearchRequest.requests().isEmpty()) { + // we may failed to turn all percolate requests into search requests, + // in that case just return the response... + listener.onResponse( + createMultiPercolateResponse(new MultiSearchResponse(new MultiSearchResponse.Item[0]), request, preFailures) + ); + } else { + client.multiSearch(multiSearchRequest, new ActionListener() { @Override - public void onResponse(TransportShardMultiPercolateAction.Response response) { - onShardResponse(shardId, response); + public void onResponse(MultiSearchResponse response) { + try { + listener.onResponse(createMultiPercolateResponse(response, request, preFailures)); + } catch (Exception e) { + onFailure(e); + } } @Override public void onFailure(Throwable e) { - onShardFailure(shardId, e); + listener.onFailure(e); } - }); } + } catch (Exception e) { + listener.onFailure(e); } + } - @SuppressWarnings("unchecked") - void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) { - logger.trace("{} Percolate shard response", shardId); + private MultiSearchRequest createMultiSearchRequest(MultiPercolateRequest multiPercolateRequest, Map getResponseSources, Map preFailures) throws IOException { + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + multiSearchRequest.indicesOptions(multiPercolateRequest.indicesOptions()); + + for (int i = 0; i < multiPercolateRequest.requests().size(); i++) { + if (preFailures.keySet().contains(i)) { + continue; + } + + PercolateRequest percolateRequest = multiPercolateRequest.requests().get(i); + BytesReference docSource = getResponseSources.get(i); try { - for (TransportShardMultiPercolateAction.Response.Item item : response.items()) { - AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot()); - if (shardResults == null) { - assert false : "shardResults can't be null"; - continue; - } - - if (item.failed()) { - shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, item.error())); - } else { - shardResults.set(shardId.id(), item.response()); - } - - assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one"; - if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) { - // Failure won't bubble up, since we fail the whole request now via the catch clause below, - // so expectedOperationsPerItem will not be decremented twice. - reduce(item.slot()); - } - } - } catch (Throwable e) { - logger.error("{} Percolate original reduce error", e, shardId); - finalListener.onFailure(e); + SearchRequest searchRequest = TransportPercolateAction.createSearchRequest( + percolateRequest, docSource, queryRegistry, aggParsers, parseFieldMatcher + ); + multiSearchRequest.add(searchRequest); + } catch (Exception e) { + preFailures.put(i, new MultiPercolateResponse.Item(e)); } } - @SuppressWarnings("unchecked") - void onShardFailure(ShardId shardId, Throwable e) { - logger.debug("{} Shard multi percolate failure", e, shardId); - try { - IntArrayList slots = shardToSlots.get(shardId); - for (int i = 0; i < slots.size(); i++) { - int slot = slots.get(i); - AtomicReferenceArray shardResults = responsesByItemAndShard.get(slot); - if (shardResults == null) { - continue; - } + return multiSearchRequest; + } - shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, e)); - assert expectedOperationsPerItem.get(slot).get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage(); - if (expectedOperationsPerItem.get(slot).decrementAndGet() == 0) { - reduce(slot); - } - } - } catch (Throwable t) { - logger.error("{} Percolate original reduce error, original error {}", t, shardId, e); - finalListener.onFailure(t); - } - } - - void reduce(int slot) { - AtomicReferenceArray shardResponses = responsesByItemAndShard.get(slot); - PercolateResponse reducedResponse = TransportPercolateAction.reduce((PercolateRequest) percolateRequests.get(slot), shardResponses, percolatorService); - reducedResponses.set(slot, reducedResponse); - assert expectedOperations.get() >= 1 : "slot[" + slot + "] expected options should be >= 1 but is " + expectedOperations.get(); - if (expectedOperations.decrementAndGet() == 0) { - finish(); - } - } - - void finish() { - MultiPercolateResponse.Item[] finalResponse = new MultiPercolateResponse.Item[reducedResponses.length()]; - for (int slot = 0; slot < reducedResponses.length(); slot++) { - Object element = reducedResponses.get(slot); - assert element != null : "Element[" + slot + "] shouldn't be null"; - if (element instanceof PercolateResponse) { - finalResponse[slot] = new MultiPercolateResponse.Item((PercolateResponse) element); - } else if (element instanceof Throwable) { - finalResponse[slot] = new MultiPercolateResponse.Item((Throwable)element); - } else if (element instanceof MultiGetResponse.Failure) { - finalResponse[slot] = new MultiPercolateResponse.Item(((MultiGetResponse.Failure)element).getFailure()); + private MultiPercolateResponse createMultiPercolateResponse(MultiSearchResponse multiSearchResponse, MultiPercolateRequest request, Map preFailures) { + int searchResponseIndex = 0; + MultiPercolateResponse.Item[] percolateItems = new MultiPercolateResponse.Item[request.requests().size()]; + for (int i = 0; i < percolateItems.length; i++) { + if (preFailures.keySet().contains(i)) { + percolateItems[i] = preFailures.get(i); + } else { + MultiSearchResponse.Item searchItem = multiSearchResponse.getResponses()[searchResponseIndex++]; + if (searchItem.isFailure()) { + percolateItems[i] = new MultiPercolateResponse.Item(searchItem.getFailure()); + } else { + PercolateRequest percolateRequest = request.requests().get(i); + percolateItems[i] = new MultiPercolateResponse.Item(TransportPercolateAction.createPercolateResponse(searchItem.getResponse(), percolateRequest.onlyCount())); } } - finalListener.onResponse(new MultiPercolateResponse(finalResponse)); } - + return new MultiPercolateResponse(percolateItems); } } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index 2a8f1a4ed24..b23ef04021e 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -18,71 +18,74 @@ */ package org.elasticsearch.action.percolate; -import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.TransportGetAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.engine.DocumentMissingException; -import org.elasticsearch.percolator.PercolateException; -import org.elasticsearch.percolator.PercolatorService; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.PercolatorQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.TemplateQueryParser; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.rest.action.support.RestActions; +import org.elasticsearch.script.Template; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.aggregations.AggregatorParsers; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.Arrays; -/** - * - */ -public class TransportPercolateAction extends TransportBroadcastAction { +public class TransportPercolateAction extends HandledTransportAction { - private final PercolatorService percolatorService; - private final TransportGetAction getAction; + private final Client client; + private final ParseFieldMatcher parseFieldMatcher; + private final IndicesQueriesRegistry queryRegistry; + private final AggregatorParsers aggParsers; @Inject - public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, PercolatorService percolatorService, - TransportGetAction getAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters, - indexNameExpressionResolver, PercolateRequest::new, PercolateShardRequest::new, ThreadPool.Names.PERCOLATE); - this.percolatorService = percolatorService; - this.getAction = getAction; + public TransportPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + Client client, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers) { + super(settings, PercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PercolateRequest::new); + this.client = client; + this.aggParsers = aggParsers; + this.parseFieldMatcher = new ParseFieldMatcher(settings); + this.queryRegistry = indicesQueriesRegistry; } @Override - protected void doExecute(Task task, final PercolateRequest request, final ActionListener listener) { - request.startTime = System.currentTimeMillis(); + protected void doExecute(PercolateRequest request, ActionListener listener) { if (request.getRequest() != null) { - getAction.execute(request.getRequest(), new ActionListener() { + client.get(request.getRequest(), new ActionListener() { @Override public void onResponse(GetResponse getResponse) { - if (!getResponse.isExists()) { - onFailure(new DocumentMissingException(null, request.getRequest().type(), request.getRequest().id())); - return; + if (getResponse.isExists()) { + innerDoExecute(request, getResponse.getSourceAsBytesRef(), listener); + } else { + onFailure(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", request.getRequest().index(), request.getRequest().type(), request.getRequest().id())); } - - BytesReference docSource = getResponse.getSourceAsBytesRef(); - TransportPercolateAction.super.doExecute(task, new PercolateRequest(request, docSource), listener); } @Override @@ -91,99 +94,153 @@ public class TransportPercolateAction extends TransportBroadcastAction shardResults = null; - List shardFailures = null; - - boolean onlyCount = false; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - PercolateShardResponse percolateShardResponse = (PercolateShardResponse) shardResponse; - successfulShards++; - if (!percolateShardResponse.isEmpty()) { - if (shardResults == null) { - onlyCount = percolateShardResponse.onlyCount(); - shardResults = new ArrayList<>(); - } - shardResults.add(percolateShardResponse); - } - } - } - - if (shardResults == null) { - long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime); - PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY; - return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches); - } else { - PercolatorService.ReduceResult result = null; - try { - result = percolatorService.reduce(onlyCount, shardResults); - } catch (IOException e) { - throw new ElasticsearchException("error during reduce phase", e); - } - long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime); - return new PercolateResponse( - shardsResponses.length(), successfulShards, failedShards, shardFailures, - result.matches(), result.count(), tookInMillis, result.reducedAggregations() - ); - } - } - - @Override - protected PercolateShardRequest newShardRequest(int numShards, ShardRouting shard, PercolateRequest request) { - return new PercolateShardRequest(shard.shardId(), numShards, request); - } - - @Override - protected PercolateShardResponse newShardResponse() { - return new PercolateShardResponse(); - } - - @Override - protected GroupShardsIterator shards(ClusterState clusterState, PercolateRequest request, String[] concreteIndices) { - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); - } - - @Override - protected PercolateShardResponse shardOperation(PercolateShardRequest request) { + private void innerDoExecute(PercolateRequest request, BytesReference docSource, ActionListener listener) { + SearchRequest searchRequest; try { - return percolatorService.percolate(request); - } catch (Throwable e) { - logger.trace("{} failed to percolate", e, request.shardId()); - throw new PercolateException(request.shardId(), "failed to percolate", e); + searchRequest = createSearchRequest(request, docSource, queryRegistry, aggParsers, parseFieldMatcher); + } catch (IOException e) { + listener.onFailure(e); + return; } + client.search(searchRequest, new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + try { + listener.onResponse(createPercolateResponse(searchResponse, request.onlyCount())); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }); + } + + public static SearchRequest createSearchRequest(PercolateRequest percolateRequest, BytesReference documentSource, IndicesQueriesRegistry queryRegistry, AggregatorParsers aggParsers, ParseFieldMatcher parseFieldMatcher) throws IOException { + SearchRequest searchRequest = new SearchRequest(); + if (percolateRequest.indices() != null) { + searchRequest.indices(percolateRequest.indices()); + } + searchRequest.indicesOptions(percolateRequest.indicesOptions()); + searchRequest.routing(percolateRequest.routing()); + searchRequest.preference(percolateRequest.preference()); + + BytesReference querySource = null; + XContentBuilder searchSource = XContentFactory.jsonBuilder().startObject(); + if (percolateRequest.source() != null && percolateRequest.source().length() > 0) { + try (XContentParser parser = XContentHelper.createParser(percolateRequest.source())) { + String currentFieldName = null; + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Unknown token [" + token+ "]"); + } + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("doc".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.copyCurrentStructure(parser); + builder.flush(); + documentSource = builder.bytes(); + } else if ("query".equals(currentFieldName) || "filter".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.copyCurrentStructure(parser); + builder.flush(); + querySource = builder.bytes(); + } else if ("sort".equals(currentFieldName)) { + searchSource.field("sort"); + searchSource.copyCurrentStructure(parser); + } else if ("aggregations".equals(currentFieldName)) { + searchSource.field("aggregations"); + searchSource.copyCurrentStructure(parser); + } else if ("highlight".equals(currentFieldName)) { + searchSource.field("highlight"); + searchSource.copyCurrentStructure(parser); + } else { + throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("sort".equals(currentFieldName)) { + searchSource.field("sort"); + searchSource.copyCurrentStructure(parser); + } else { + throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); + } + } else if (token.isValue()) { + if ("size".equals(currentFieldName)) { + searchSource.field("size", parser.intValue()); + } else if ("sort".equals(currentFieldName)) { + searchSource.field("sort", parser.text()); + } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { + searchSource.field("track_scores", parser.booleanValue()); + } else { + throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]"); + } + } else { + throw new IllegalArgumentException("Unknown token [" + token + "]"); + } + } + } + } + + if (percolateRequest.onlyCount()) { + searchSource.field("size", 0); + } + + PercolatorQueryBuilder percolatorQueryBuilder = new PercolatorQueryBuilder(percolateRequest.documentType(), documentSource); + if (querySource != null) { + QueryParseContext queryParseContext = new QueryParseContext(queryRegistry); + queryParseContext.reset(XContentHelper.createParser(querySource)); + queryParseContext.parseFieldMatcher(parseFieldMatcher); + QueryBuilder queryBuilder = queryParseContext.parseInnerQueryBuilder(); + BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery(); + boolQueryBuilder.must(queryBuilder); + boolQueryBuilder.filter(percolatorQueryBuilder); + searchSource.field("query", boolQueryBuilder); + } else { + searchSource.field("query", percolatorQueryBuilder); + } + + searchSource.endObject(); + searchSource.flush(); + BytesReference source = searchSource.bytes(); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + QueryParseContext context = new QueryParseContext(queryRegistry); + try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(source)) { + context.reset(parser); + context.parseFieldMatcher(parseFieldMatcher); + searchSourceBuilder.parseXContent(parser, context, aggParsers, null); + searchRequest.source(searchSourceBuilder); + return searchRequest; + } + } + + public static PercolateResponse createPercolateResponse(SearchResponse searchResponse, boolean onlyCount) { + SearchHits hits = searchResponse.getHits(); + PercolateResponse.Match[] matches; + if (onlyCount) { + matches = null; + } else { + matches = new PercolateResponse.Match[hits.getHits().length]; + for (int i = 0; i < hits.getHits().length; i++) { + SearchHit hit = hits.getHits()[i]; + matches[i] = new PercolateResponse.Match(new Text(hit.getIndex()), new Text(hit.getId()), hit.getScore(), hit.getHighlightFields()); + } + } + + return new PercolateResponse( + searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(), + Arrays.asList(searchResponse.getShardFailures()), matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations() + ); } } diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java deleted file mode 100644 index 0732d4d4066..00000000000 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.percolate; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.action.support.single.shard.SingleShardRequest; -import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.percolator.PercolatorService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - */ -public class TransportShardMultiPercolateAction extends TransportSingleShardAction { - - private final PercolatorService percolatorService; - - private static final String ACTION_NAME = MultiPercolateAction.NAME + "[shard]"; - - @Inject - public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, PercolatorService percolatorService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - Request::new, ThreadPool.Names.PERCOLATE); - this.percolatorService = percolatorService; - } - - @Override - protected boolean isSubAction() { - return true; - } - - @Override - protected Response newResponse() { - return new Response(); - } - - @Override - protected boolean resolveIndex(Request request) { - return false; - } - - @Override - protected ShardIterator shards(ClusterState state, InternalRequest request) { - return clusterService.operationRouting().getShards( - state, request.concreteIndex(), request.request().shardId(), request.request().preference - ); - } - - @Override - protected Response shardOperation(Request request, ShardId shardId) { - // TODO: Look into combining the shard req's docs into one in memory index. - Response response = new Response(); - response.items = new ArrayList<>(request.items.size()); - for (Request.Item item : request.items) { - Response.Item responseItem; - int slot = item.slot; - try { - responseItem = new Response.Item(slot, percolatorService.percolate(item.request)); - } catch (Throwable t) { - if (TransportActions.isShardNotAvailableException(t)) { - throw (ElasticsearchException) t; - } else { - logger.debug("{} failed to multi percolate", t, request.shardId()); - responseItem = new Response.Item(slot, t); - } - } - response.items.add(responseItem); - } - return response; - } - - - public static class Request extends SingleShardRequest implements IndicesRequest { - - private int shardId; - private String preference; - private List items; - - public Request() { - } - - Request(String concreteIndex, int shardId, String preference) { - super(concreteIndex); - this.shardId = shardId; - this.preference = preference; - this.items = new ArrayList<>(); - } - - @Override - public ActionRequestValidationException validate() { - return super.validateNonNullIndex(); - } - - @Override - public String[] indices() { - List indices = new ArrayList<>(); - for (Item item : items) { - Collections.addAll(indices, item.request.indices()); - } - return indices.toArray(new String[indices.size()]); - } - - public int shardId() { - return shardId; - } - - public void add(Item item) { - items.add(item); - } - - public List items() { - return items; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - shardId = in.readVInt(); - preference = in.readOptionalString(); - int size = in.readVInt(); - items = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - int slot = in.readVInt(); - PercolateShardRequest shardRequest = new PercolateShardRequest(); - shardRequest.readFrom(in); - Item item = new Item(slot, shardRequest); - items.add(item); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(shardId); - out.writeOptionalString(preference); - out.writeVInt(items.size()); - for (Item item : items) { - out.writeVInt(item.slot); - item.request.writeTo(out); - } - } - - static class Item { - - private final int slot; - private final PercolateShardRequest request; - - public Item(int slot, PercolateShardRequest request) { - this.slot = slot; - this.request = request; - } - - public int slot() { - return slot; - } - - public PercolateShardRequest request() { - return request; - } - - } - - } - - public static class Response extends ActionResponse { - - private List items; - - public List items() { - return items; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(items.size()); - for (Item item : items) { - out.writeVInt(item.slot); - if (item.response != null) { - out.writeBoolean(true); - item.response.writeTo(out); - } else { - out.writeBoolean(false); - out.writeThrowable(item.error); - } - } - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - items = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - int slot = in.readVInt(); - if (in.readBoolean()) { - PercolateShardResponse shardResponse = new PercolateShardResponse(); - shardResponse.readFrom(in); - items.add(new Item(slot, shardResponse)); - } else { - items.add(new Item(slot, in.readThrowable())); - } - } - } - - public static class Item { - - private final int slot; - private final PercolateShardResponse response; - private final Throwable error; - - public Item(Integer slot, PercolateShardResponse response) { - this.slot = slot; - this.response = response; - this.error = null; - } - - public Item(Integer slot, Throwable error) { - this.slot = slot; - this.error = error; - this.response = null; - } - - public int slot() { - return slot; - } - - public PercolateShardResponse response() { - return response; - } - - public Throwable error() { - return error; - } - - public boolean failed() { - return error != null; - } - } - - } - -} diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 830a54778e1..732e9098ee7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TopDocs; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -34,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -96,7 +96,7 @@ abstract class AbstractSearchAsyncAction // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(), + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(), startTime(), request.indices()); for (String index : concreteIndices) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java index 56d0fedd40c..f7cb72b22e9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryAndFetchAsyncAction.java @@ -21,9 +21,9 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.action.SearchTransportService; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index f2dcefa7554..c5f320f1b33 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -23,9 +23,9 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java index dcbf9b5091f..1b338847762 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryAndFetchAsyncAction.java @@ -21,9 +21,9 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.controller.SearchPhaseController; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index e15b9da8acb..3feb40411f9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -23,9 +23,9 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchShardTarget; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 9d3c200ed98..e87d393bf05 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -295,6 +295,13 @@ public class SearchRequest extends ActionRequest implements Indic return this.requestCache; } + /** + * @return true if the request only has suggest + */ + public boolean isSuggestOnly() { + return source != null && source.isSuggestOnly(); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index ffca87de22c..931df24a256 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -393,9 +393,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap); @@ -72,6 +73,17 @@ public class TransportSearchAction extends HandledTransportAction { - - public static final SuggestAction INSTANCE = new SuggestAction(); - public static final String NAME = "indices:data/read/suggest"; - - private SuggestAction() { - super(NAME); - } - - @Override - public SuggestResponse newResponse() { - return new SuggestResponse(new Suggest()); - } - - @Override - public SuggestRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new SuggestRequestBuilder(client, this); - } -} diff --git a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java b/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java deleted file mode 100644 index 0d1c4932d48..00000000000 --- a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.suggest; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.broadcast.BroadcastRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.search.suggest.SuggestBuilder; - -import java.io.IOException; -import java.util.Arrays; - -/** - * A request to get suggestions for corrections of phrases. Best created with - * {@link org.elasticsearch.client.Requests#suggestRequest(String...)}. - *

- * The request requires the suggest query source to be set either using - * {@link #suggest(org.elasticsearch.common.bytes.BytesReference)} / {@link #suggest(org.elasticsearch.common.bytes.BytesReference)} - * or by using {@link #suggest(org.elasticsearch.search.suggest.SuggestBuilder)} - * (Best created using the {link @org.elasticsearch.search.suggest.SuggestBuilders)}). - * - * @see SuggestResponse - * @see org.elasticsearch.client.Client#suggest(SuggestRequest) - * @see org.elasticsearch.client.Requests#suggestRequest(String...) - * @see org.elasticsearch.search.suggest.SuggestBuilders - */ -public final class SuggestRequest extends BroadcastRequest { - - @Nullable - private String routing; - - @Nullable - private String preference; - - private BytesReference suggestSource; - - public SuggestRequest() { - } - - /** - * Constructs a new suggest request against the provided indices. No indices provided means it will - * run against all indices. - */ - public SuggestRequest(String... indices) { - super(indices); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); - return validationException; - } - - /** - * The Phrase to get correction suggestions for - */ - public BytesReference suggest() { - return suggestSource; - } - - /** - * set a new source for the suggest query - */ - public SuggestRequest suggest(BytesReference suggestSource) { - this.suggestSource = suggestSource; - return this; - } - - /** - * set a new source using a {@link org.elasticsearch.search.suggest.SuggestBuilder} - * for phrase and term suggestion lookup - */ - public SuggestRequest suggest(SuggestBuilder suggestBuilder) { - return suggest(suggestBuilder.buildAsBytes(Requests.CONTENT_TYPE)); - } - - /** - * set a new source using a {@link org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder} - * for completion suggestion lookup - */ - public SuggestRequest suggest(SuggestBuilder.SuggestionBuilder suggestionBuilder) { - return suggest(suggestionBuilder.buildAsBytes(Requests.CONTENT_TYPE)); - } - - public SuggestRequest suggest(String source) { - return suggest(new BytesArray(source)); - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public String routing() { - return this.routing; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public SuggestRequest routing(String routing) { - this.routing = routing; - return this; - } - - /** - * The routing values to control the shards that the search will be executed on. - */ - public SuggestRequest routing(String... routings) { - this.routing = Strings.arrayToCommaDelimitedString(routings); - return this; - } - - public SuggestRequest preference(String preference) { - this.preference = preference; - return this; - } - - public String preference() { - return this.preference; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - routing = in.readOptionalString(); - preference = in.readOptionalString(); - suggest(in.readBytesReference()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalString(routing); - out.writeOptionalString(preference); - out.writeBytesReference(suggestSource); - } - - @Override - public String toString() { - String sSource = "_na_"; - try { - sSource = XContentHelper.convertToJson(suggestSource, false); - } catch (Exception e) { - // ignore - } - return "[" + Arrays.toString(indices) + "]" + ", suggestSource[" + sSource + "]"; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java deleted file mode 100644 index 06a2b00c648..00000000000 --- a/core/src/main/java/org/elasticsearch/action/suggest/SuggestRequestBuilder.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.suggest; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.search.suggest.SuggestBuilder; -import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; - -import java.io.IOException; - -/** - * A suggest action request builder. - */ -public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder { - - final SuggestBuilder suggest = new SuggestBuilder(); - - public SuggestRequestBuilder(ElasticsearchClient client, SuggestAction action) { - super(client, action, new SuggestRequest()); - } - - /** - * Add a definition for suggestions to the request - */ - public SuggestRequestBuilder addSuggestion(SuggestionBuilder suggestion) { - suggest.addSuggestion(suggestion); - return this; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public SuggestRequestBuilder setRouting(String routing) { - request.routing(routing); - return this; - } - - public SuggestRequestBuilder setSuggestText(String globalText) { - this.suggest.setText(globalText); - return this; - } - - /** - * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * _local to prefer local shards, _primary to execute only on primary shards, - * _shards:x,y to operate on shards x & y, or a custom value, which guarantees that the same order - * will be used across different requests. - */ - public SuggestRequestBuilder setPreference(String preference) { - request.preference(preference); - return this; - } - - /** - * The routing values to control the shards that the search will be executed on. - */ - public SuggestRequestBuilder setRouting(String... routing) { - request.routing(routing); - return this; - } - - @Override - protected SuggestRequest beforeExecute(SuggestRequest request) { - try { - XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE); - suggest.toXContent(builder, ToXContent.EMPTY_PARAMS); - request.suggest(builder.bytes()); - } catch (IOException e) { - throw new ElasticsearchException("Unable to build suggestion request", e); - } - return request; - } -} diff --git a/core/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java b/core/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java deleted file mode 100644 index 445e804b5b5..00000000000 --- a/core/src/main/java/org/elasticsearch/action/suggest/SuggestResponse.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.suggest; - -import org.elasticsearch.action.ShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.search.suggest.Suggest; - -import java.io.IOException; -import java.util.List; - -import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; - -/** - * The response of the suggest action. - */ -public final class SuggestResponse extends BroadcastResponse { - - private final Suggest suggest; - - SuggestResponse(Suggest suggest) { - this.suggest = suggest; - } - - SuggestResponse(Suggest suggest, int totalShards, int successfulShards, int failedShards, List shardFailures) { - super(totalShards, successfulShards, failedShards, shardFailures); - this.suggest = suggest; - } - - /** - * The Suggestions of the phrase. - */ - public Suggest getSuggest() { - return suggest; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - this.suggest.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - this.suggest.writeTo(out); - } - - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - suggest.toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return builder.string(); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java deleted file mode 100644 index 0ed98578557..00000000000 --- a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.suggest; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ShardOperationFailedException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.suggest.stats.ShardSuggestMetric; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.search.suggest.SuggestPhase; -import org.elasticsearch.search.suggest.SuggestionSearchContext; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - * Defines the transport of a suggestion request across the cluster - */ -public class TransportSuggestAction extends TransportBroadcastAction { - - private final IndicesService indicesService; - private final SuggestPhase suggestPhase; - - @Inject - public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, SuggestPhase suggestPhase, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - SuggestRequest::new, ShardSuggestRequest::new, ThreadPool.Names.SUGGEST); - this.indicesService = indicesService; - this.suggestPhase = suggestPhase; - } - - @Override - protected ShardSuggestRequest newShardRequest(int numShards, ShardRouting shard, SuggestRequest request) { - return new ShardSuggestRequest(shard.shardId(), request); - } - - @Override - protected ShardSuggestResponse newShardResponse() { - return new ShardSuggestResponse(); - } - - @Override - protected GroupShardsIterator shards(ClusterState clusterState, SuggestRequest request, String[] concreteIndices) { - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices()); - return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference()); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, SuggestRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.READ); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, SuggestRequest countRequest, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); - } - - @Override - protected SuggestResponse newResponse(SuggestRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { - int successfulShards = 0; - int failedShards = 0; - - final Map> groupedSuggestions = new HashMap<>(); - - List shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // simply ignore non active shards - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - Suggest suggest = ((ShardSuggestResponse) shardResponse).getSuggest(); - Suggest.group(groupedSuggestions, suggest); - successfulShards++; - } - } - - return new SuggestResponse(new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(), successfulShards, failedShards, shardFailures); - } - - @Override - protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric(); - suggestMetric.preSuggest(); - long startTime = System.nanoTime(); - XContentParser parser = null; - try (Engine.Searcher searcher = indexShard.acquireSearcher("suggest")) { - BytesReference suggest = request.suggest(); - if (suggest != null && suggest.length() > 0) { - parser = XContentFactory.xContent(suggest).createParser(suggest); - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("suggest content missing"); - } - final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(), - indexService.fieldData(), request.shardId().getIndexName(), request.shardId().id()); - final Suggest result = suggestPhase.execute(context, searcher.searcher()); - return new ShardSuggestResponse(request.shardId(), result); - } - return new ShardSuggestResponse(request.shardId(), new Suggest()); - } catch (Throwable ex) { - throw new ElasticsearchException("failed to execute suggest", ex); - } finally { - if (parser != null) { - parser.close(); - } - suggestMetric.postSuggest(System.nanoTime() - startTime); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/action/suggest/package-info.java b/core/src/main/java/org/elasticsearch/action/suggest/package-info.java deleted file mode 100644 index a2c0f48ea51..00000000000 --- a/core/src/main/java/org/elasticsearch/action/suggest/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Suggest action. - */ -package org.elasticsearch.action.suggest; \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java index d5574755346..339abcb22bc 100644 --- a/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java +++ b/core/src/main/java/org/elasticsearch/action/support/AutoCreateIndex.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.MapperService; @@ -39,7 +40,8 @@ import java.util.List; */ public final class AutoCreateIndex { - public static final Setting AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER); + public static final Setting AUTO_CREATE_INDEX_SETTING = + new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope); private final boolean dynamicMappingDisabled; private final IndexNameExpressionResolver resolver; diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 5f2fb33e043..31fc1d06175 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -33,7 +34,8 @@ public final class DestructiveOperations extends AbstractComponent { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ - public static final Setting REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER); + public static final Setting REQUIRES_NAME_SETTING = + Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope); private volatile boolean destructiveRequiresName; @Inject diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index e39fb0288ac..182d922fc39 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -33,6 +32,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; @@ -125,7 +125,7 @@ public abstract class TransportBroadcastAction, Response extends ActionResponse> extends TransportMasterNodeAction { - public static final Setting FORCE_LOCAL_SETTING = Setting.boolSetting("action.master.force_local", false, false, Setting.Scope.CLUSTER); + public static final Setting FORCE_LOCAL_SETTING = + Setting.boolSetting("action.master.force_local", false, Property.NodeScope); private final boolean forceLocal; diff --git a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java index 7e42036c1d1..66b9fce5d71 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java @@ -22,9 +22,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -50,7 +50,7 @@ public abstract class TransportClusterInfoAction listener) { - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); doMasterOperation(request, concreteIndices, state, listener); } diff --git a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 7e2702afd8a..9c021efbe40 100644 --- a/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -26,11 +26,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChildTaskRequest; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 6283e69a02e..b4cfbb6ad88 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -51,6 +51,8 @@ public abstract class ReplicationRequest shards(Request request, ClusterState clusterState) { List shardIds = new ArrayList<>(); - String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); for (String index : concreteIndices) { IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index); if (indexMetaData != null) { diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 96b70a08b83..4462caac0d0 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -28,10 +28,8 @@ import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -43,6 +41,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; @@ -103,7 +102,6 @@ public abstract class TransportReplicationAction request, Supplier replicaRequest, String executor) { super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager()); @@ -121,7 +119,6 @@ public abstract class TransportReplicationAction primaryResponse = shardOperationOnPrimary(state.metaData(), request); + primaryResponse.v2().primaryTerm(indexShardReference.opPrimaryTerm()); if (logger.isTraceEnabled()) { logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version()); } @@ -864,7 +836,7 @@ public abstract class TransportReplicationAction() { diff --git a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 2d13b7f99ac..0c7f0627c66 100644 --- a/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -34,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; @@ -141,7 +141,7 @@ public abstract class TransportSingleShardAction> extends @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (taskId.isSet() == false && nodesIds.length > 0) { + if (taskId.isSet() && nodesIds.length > 0) { validationException = addValidationError("task id cannot be used together with node ids", validationException); } @@ -71,7 +71,7 @@ public class BaseTasksRequest> extends * Sets the list of action masks for the actions that should be returned */ @SuppressWarnings("unchecked") - public final Request actions(String... actions) { + public final Request setActions(String... actions) { this.actions = actions; return (Request) this; } @@ -79,16 +79,16 @@ public class BaseTasksRequest> extends /** * Return the list of action masks for the actions that should be returned */ - public String[] actions() { + public String[] getActions() { return actions; } - public final String[] nodesIds() { + public final String[] getNodesIds() { return nodesIds; } @SuppressWarnings("unchecked") - public final Request nodesIds(String... nodesIds) { + public final Request setNodesIds(String... nodesIds) { this.nodesIds = nodesIds; return (Request) this; } @@ -98,12 +98,12 @@ public class BaseTasksRequest> extends * * By default tasks with any ids are returned. */ - public TaskId taskId() { + public TaskId getTaskId() { return taskId; } @SuppressWarnings("unchecked") - public final Request taskId(TaskId taskId) { + public final Request setTaskId(TaskId taskId) { this.taskId = taskId; return (Request) this; } @@ -112,29 +112,29 @@ public class BaseTasksRequest> extends /** * Returns the parent task id that tasks should be filtered by */ - public TaskId parentTaskId() { + public TaskId getParentTaskId() { return parentTaskId; } @SuppressWarnings("unchecked") - public Request parentTaskId(TaskId parentTaskId) { + public Request setParentTaskId(TaskId parentTaskId) { this.parentTaskId = parentTaskId; return (Request) this; } - public TimeValue timeout() { + public TimeValue getTimeout() { return this.timeout; } @SuppressWarnings("unchecked") - public final Request timeout(TimeValue timeout) { + public final Request setTimeout(TimeValue timeout) { this.timeout = timeout; return (Request) this; } @SuppressWarnings("unchecked") - public final Request timeout(String timeout) { + public final Request setTimeout(String timeout) { this.timeout = TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"); return (Request) this; } @@ -162,15 +162,15 @@ public class BaseTasksRequest> extends } public boolean match(Task task) { - if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) { + if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) { return false; } - if (taskId().isSet() == false) { - if(taskId().getId() != task.getId()) { + if (getTaskId().isSet()) { + if(getTaskId().getId() != task.getId()) { return false; } } - if (parentTaskId.isSet() == false) { + if (parentTaskId.isSet()) { if (parentTaskId.equals(task.getParentTaskId()) == false) { return false; } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index a7265ce9998..a510a847c62 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -35,19 +35,19 @@ public class TasksRequestBuilder , Res @SuppressWarnings("unchecked") public final RequestBuilder setNodesIds(String... nodesIds) { - request.nodesIds(nodesIds); + request.setNodesIds(nodesIds); return (RequestBuilder) this; } @SuppressWarnings("unchecked") public final RequestBuilder setActions(String... actions) { - request.actions(actions); + request.setActions(actions); return (RequestBuilder) this; } @SuppressWarnings("unchecked") public final RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); + request.setTimeout(timeout); return (RequestBuilder) this; } } diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index f10b9f23327..ad7702466cd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -28,11 +28,11 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChildTaskRequest; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -124,25 +124,25 @@ public abstract class TransportTasksAction< } protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) { - if (request.taskId().isSet()) { - return clusterState.nodes().resolveNodesIds(request.nodesIds()); + if (request.getTaskId().isSet()) { + return new String[]{request.getTaskId().getNodeId()}; } else { - return new String[]{request.taskId().getNodeId()}; + return clusterState.nodes().resolveNodesIds(request.getNodesIds()); } } protected void processTasks(TasksRequest request, Consumer operation) { - if (request.taskId().isSet() == false) { + if (request.getTaskId().isSet()) { // we are only checking one task, we can optimize it - Task task = taskManager.getTask(request.taskId().getId()); + Task task = taskManager.getTask(request.getTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept((OperationTask) task); } else { - throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId()); + throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId()); } } else { - throw new ResourceNotFoundException("task [{}] is missing", request.taskId()); + throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId()); } } else { for (Task task : taskManager.getTasks().values()) { @@ -207,8 +207,8 @@ public abstract class TransportTasksAction< this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds); ImmutableOpenMap nodes = clusterState.nodes().nodes(); this.nodes = new DiscoveryNode[nodesIds.length]; - for (int i = 0; i < nodesIds.length; i++) { - this.nodes[i] = nodes.get(nodesIds[i]); + for (int i = 0; i < this.nodesIds.length; i++) { + this.nodes[i] = nodes.get(this.nodesIds[i]); } this.responses = new AtomicReferenceArray<>(this.nodesIds.length); } @@ -224,8 +224,8 @@ public abstract class TransportTasksAction< } } else { TransportRequestOptions.Builder builder = TransportRequestOptions.builder(); - if (request.timeout() != null) { - builder.withTimeout(request.timeout()); + if (request.getTimeout() != null) { + builder.withTimeout(request.getTimeout()); } builder.withCompress(transportCompress()); for (int i = 0; i < nodesIds.length; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index 7047ee69040..d71958cefde 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -20,13 +20,12 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -72,7 +71,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction listener, final int retryCount) { - final IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); - final IndexShard indexShard = indexService.getShard(request.shardId()); + final ShardId shardId = request.getShardId(); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); switch (result.operation()) { case UPSERT: @@ -194,7 +196,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio if (e instanceof VersionConflictEngineException) { if (retryCount < request.retryOnConflict()) { logger.trace("Retry attempt [{}] of [{}] on version conflict on [{}][{}][{}]", - retryCount + 1, request.retryOnConflict(), request.index(), request.shardId(), request.id()); + retryCount + 1, request.retryOnConflict(), request.index(), request.getShardId(), request.id()); threadPool.executor(executor()).execute(new ActionRunnable(listener) { @Override protected void doRun() { @@ -267,9 +269,9 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio break; case NONE: UpdateResponse update = result.action(); - IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); + IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex()); if (indexServiceOrNull != null) { - IndexShard shard = indexService.getShardOrNull(request.shardId()); + IndexShard shard = indexService.getShardOrNull(shardId.getId()); if (shard != null) { shard.noopUpdate(request.type()); } diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6bc69ed4d9c..0877ea1c66b 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; @@ -43,6 +44,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService.ScriptType; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -88,7 +90,7 @@ public class UpdateRequest extends InstanceShardOperationRequest } public UpdateRequest(String index, String type, String id) { - this.index = index; + super(index); this.type = type; this.id = id; } @@ -195,7 +197,7 @@ public class UpdateRequest extends InstanceShardOperationRequest return parent; } - int shardId() { + public ShardId getShardId() { return this.shardId; } @@ -670,9 +672,15 @@ public class UpdateRequest extends InstanceShardOperationRequest } else if ("detect_noop".equals(currentFieldName)) { detectNoop(parser.booleanValue()); } else if ("fields".equals(currentFieldName)) { - List values = parser.list(); - String[] fields = values.toArray(new String[values.size()]); - fields(fields); + List fields = null; + if (token == XContentParser.Token.START_ARRAY) { + fields = (List) parser.list(); + } else if (token.isValue()) { + fields = Collections.singletonList(parser.text()); + } + if (fields != null) { + fields(fields.toArray(new String[fields.size()])); + } } else { //here we don't have settings available, unable to throw deprecation exceptions scriptParameterParser.token(currentFieldName, token, parser, ParseFieldMatcher.EMPTY); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 215659054d2..2cb4fb6450a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -24,10 +24,10 @@ import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.PidFile; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.LogConfigurator; @@ -45,10 +45,9 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.file.Path; import java.util.Locale; +import java.util.Map; import java.util.concurrent.CountDownLatch; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; - /** * Internal startup code. */ @@ -136,6 +135,8 @@ final class Bootstrap { JNANatives.trySetMaxNumberOfThreads(); + JNANatives.trySetMaxSizeVirtualMemory(); + // init lucene random seed. it will use /dev/urandom where available: StringHelper.randomId(); } @@ -189,9 +190,13 @@ final class Bootstrap { node = new Node(nodeSettings); } - private static Environment initialSettings(boolean foreground) { + private static Environment initialSettings(boolean foreground, String pidFile) { Terminal terminal = foreground ? Terminal.DEFAULT : null; - return InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal); + Settings.Builder builder = Settings.builder(); + if (Strings.hasLength(pidFile)) { + builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile); + } + return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal); } private void start() { @@ -218,22 +223,18 @@ final class Bootstrap { * This method is invoked by {@link Elasticsearch#main(String[])} * to startup elasticsearch. */ - static void init(String[] args) throws Throwable { + static void init( + final boolean foreground, + final String pidFile, + final Map esSettings) throws Throwable { // Set the system property before anything has a chance to trigger its use initLoggerPrefix(); - BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser(); - CliTool.ExitStatus status = bootstrapCLIParser.execute(args); - - if (CliTool.ExitStatus.OK != status) { - exit(status.status()); - } + elasticsearchSettings(esSettings); INSTANCE = new Bootstrap(); - boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground"))); - - Environment environment = initialSettings(foreground); + Environment environment = initialSettings(foreground, pidFile); Settings settings = environment.settings(); LogConfigurator.configure(settings, true); checkForCustomConfFile(); @@ -297,6 +298,13 @@ final class Bootstrap { } } + @SuppressForbidden(reason = "Sets system properties passed as CLI parameters") + private static void elasticsearchSettings(Map esSettings) { + for (Map.Entry esSetting : esSettings.entrySet()) { + System.setProperty(esSetting.getKey(), esSetting.getValue()); + } + } + @SuppressForbidden(reason = "System#out") private static void closeSystOut() { System.out.close(); @@ -307,14 +315,6 @@ final class Bootstrap { System.err.close(); } - @SuppressForbidden(reason = "System#err") - private static void sysError(String line, boolean flush) { - System.err.println(line); - if (flush) { - System.err.flush(); - } - } - private static void checkForCustomConfFile() { String confFileSetting = System.getProperty("es.default.config"); checkUnsetAndMaybeExit(confFileSetting, "es.default.config"); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCLIParser.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCLIParser.java deleted file mode 100644 index 25ae53873fe..00000000000 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCLIParser.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bootstrap; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.Option; -import org.elasticsearch.Build; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.monitor.jvm.JvmInfo; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.Locale; -import java.util.Map; -import java.util.Properties; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder; - -final class BootstrapCLIParser extends CliTool { - - private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class) - .cmds(Start.CMD, Version.CMD) - .build(); - - public BootstrapCLIParser() { - super(CONFIG); - } - - public BootstrapCLIParser(Terminal terminal) { - super(CONFIG, terminal); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case Start.NAME: - return Start.parse(terminal, cli); - case Version.NAME: - return Version.parse(terminal, cli); - default: - assert false : "should never get here, if the user enters an unknown command, an error message should be shown before parse is called"; - return null; - } - } - - static class Version extends CliTool.Command { - - private static final String NAME = "version"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, Version.class).build(); - - public static Command parse(Terminal terminal, CommandLine cli) { - return new Version(terminal); - } - - public Version(Terminal terminal) { - super(terminal); - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - terminal.println("Version: " + org.elasticsearch.Version.CURRENT - + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() - + ", JVM: " + JvmInfo.jvmInfo().version()); - return ExitStatus.OK_AND_EXIT; - } - } - - static class Start extends CliTool.Command { - - private static final String NAME = "start"; - - private static final CliToolConfig.Cmd CMD = cmd(NAME, Start.class) - .options( - optionBuilder("d", "daemonize").hasArg(false).required(false), - optionBuilder("p", "pidfile").hasArg(true).required(false), - optionBuilder("V", "version").hasArg(false).required(false), - Option.builder("D").argName("property=value").valueSeparator('=').numberOfArgs(2) - ) - .stopAtNonOption(true) // needed to parse the --foo.bar options, so this parser must be lenient - .build(); - - // TODO: don't use system properties as a way to do this, its horrible... - @SuppressForbidden(reason = "Sets system properties passed as CLI parameters") - public static Command parse(Terminal terminal, CommandLine cli) throws UserError { - if (cli.hasOption("V")) { - return Version.parse(terminal, cli); - } - - if (cli.hasOption("d")) { - System.setProperty("es.foreground", "false"); - } - - String pidFile = cli.getOptionValue("pidfile"); - if (!Strings.isNullOrEmpty(pidFile)) { - System.setProperty("es.pidfile", pidFile); - } - - if (cli.hasOption("D")) { - Properties properties = cli.getOptionProperties("D"); - for (Map.Entry entry : properties.entrySet()) { - String key = (String) entry.getKey(); - String propertyName = key.startsWith("es.") ? key : "es." + key; - System.setProperty(propertyName, entry.getValue().toString()); - } - } - - // hacky way to extract all the fancy extra args, there is no CLI tool helper for this - Iterator iterator = cli.getArgList().iterator(); - final Map properties = new HashMap<>(); - while (iterator.hasNext()) { - String arg = iterator.next(); - if (!arg.startsWith("--")) { - if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) { - throw new UserError(ExitStatus.USAGE, - "Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --" - ); - } else { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --"); - } - } - // if there is no = sign, we have to get the next argu - arg = arg.replace("--", ""); - if (arg.contains("=")) { - String[] splitArg = arg.split("=", 2); - String key = splitArg[0]; - String value = splitArg[1]; - properties.put("es." + key, value); - } else { - if (iterator.hasNext()) { - String value = iterator.next(); - if (value.startsWith("--")) { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value"); - } - properties.put("es." + arg, value); - } else { - throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value"); - } - } - } - for (Map.Entry entry : properties.entrySet()) { - System.setProperty(entry.getKey(), entry.getValue()); - } - return new Start(terminal); - } - - public Start(Terminal terminal) { - super(terminal); - - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - return ExitStatus.OK; - } - } - -} diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 433dd4498a4..dcb88dca848 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.monitor.process.ProcessProbe; import org.elasticsearch.transport.TransportSettings; @@ -39,7 +40,6 @@ import java.util.Set; /** * We enforce limits once any network host is configured. In this case we assume the node is running in production * and all production limit checks must pass. This should be extended as we go to settings like: - * - discovery.zen.minimum_master_nodes * - discovery.zen.ping.unicast.hosts is set if we use zen disco * - ensure we can write in all data directories * - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform) @@ -114,15 +114,19 @@ final class BootstrapCheck { } // the list of checks to execute - private static List checks(final Settings settings) { + static List checks(final Settings settings) { final List checks = new ArrayList<>(); final FileDescriptorCheck fileDescriptorCheck - = Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck(); + = Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck(); checks.add(fileDescriptorCheck); checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings))); if (Constants.LINUX) { checks.add(new MaxNumberOfThreadsCheck()); } + if (Constants.LINUX || Constants.MAC_OS_X) { + checks.add(new MaxSizeVirtualMemoryCheck()); + } + checks.add(new MinMasterNodesCheck(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(settings))); return Collections.unmodifiableList(checks); } @@ -183,10 +187,10 @@ final class BootstrapCheck { @Override public final String errorMessage() { return String.format( - Locale.ROOT, - "max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]", - getMaxFileDescriptorCount(), - limit + Locale.ROOT, + "max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]", + getMaxFileDescriptorCount(), + limit ); } @@ -223,6 +227,26 @@ final class BootstrapCheck { } + static class MinMasterNodesCheck implements Check { + + final boolean minMasterNodesIsSet; + + MinMasterNodesCheck(boolean minMasterNodesIsSet) { + this.minMasterNodesIsSet = minMasterNodesIsSet; + } + + @Override + public boolean check() { + return minMasterNodesIsSet == false; + } + + @Override + public String errorMessage() { + return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + + "] to a majority of the number of master eligible nodes in your cluster."; + } + } + static class MaxNumberOfThreadsCheck implements Check { private final long maxNumberOfThreadsThreshold = 1 << 11; @@ -249,4 +273,32 @@ final class BootstrapCheck { } + static class MaxSizeVirtualMemoryCheck implements Check { + + @Override + public boolean check() { + return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity(); + } + + @Override + public String errorMessage() { + return String.format( + Locale.ROOT, + "max size virtual memory [%d] for user [%s] likely too low, increase to [unlimited]", + getMaxSizeVirtualMemory(), + BootstrapInfo.getSystemProperties().get("user.name")); + } + + // visible for testing + long getRlimInfinity() { + return JNACLibrary.RLIM_INFINITY; + } + + // visible for testing + long getMaxSizeVirtualMemory() { + return JNANatives.MAX_SIZE_VIRTUAL_MEMORY; + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java index a20ff9bb059..4e9dffc995b 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapSettings.java @@ -20,7 +20,7 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; public final class BootstrapSettings { @@ -29,10 +29,13 @@ public final class BootstrapSettings { // TODO: remove this hack when insecure defaults are removed from java public static final Setting SECURITY_FILTER_BAD_DEFAULTS_SETTING = - Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER); + Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope); - public static final Setting MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER); - public static final Setting SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER); - public static final Setting CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER); + public static final Setting MLOCKALL_SETTING = + Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope); + public static final Setting SECCOMP_SETTING = + Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope); + public static final Setting CTRLHANDLER_SETTING = + Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 107a955696c..bb1f6cc87d5 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -19,23 +19,97 @@ package org.elasticsearch.bootstrap; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import joptsimple.util.KeyValuePair; +import org.elasticsearch.Build; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.monitor.jvm.JvmInfo; + import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; /** * This class starts elasticsearch. */ -public final class Elasticsearch { +class Elasticsearch extends Command { - /** no instantiation */ - private Elasticsearch() {} + private final OptionSpec versionOption; + private final OptionSpec daemonizeOption; + private final OptionSpec pidfileOption; + private final OptionSpec propertyOption; + + // visible for testing + Elasticsearch() { + super("starts elasticsearch"); + // TODO: in jopt-simple 5.0, make this mutually exclusive with all other options + versionOption = parser.acceptsAll(Arrays.asList("V", "version"), + "Prints elasticsearch version information and exits"); + daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"), + "Starts Elasticsearch in the background"); + // TODO: in jopt-simple 5.0 this option type can be a Path + pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"), + "Creates a pid file in the specified path on start") + .withRequiredArg(); + propertyOption = parser.accepts("E", "Configure an Elasticsearch setting").withRequiredArg().ofType(KeyValuePair.class); + } /** * Main entry point for starting elasticsearch */ - public static void main(String[] args) throws StartupError { + public static void main(final String[] args) throws Exception { + final Elasticsearch elasticsearch = new Elasticsearch(); + int status = main(args, elasticsearch, Terminal.DEFAULT); + if (status != ExitCodes.OK) { + exit(status); + } + } + + static int main(final String[] args, final Elasticsearch elasticsearch, final Terminal terminal) throws Exception { + return elasticsearch.main(args, terminal); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + if (options.nonOptionArguments().isEmpty() == false) { + throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments()); + } + if (options.has(versionOption)) { + if (options.has(daemonizeOption) || options.has(pidfileOption)) { + throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option"); + } + terminal.println("Version: " + org.elasticsearch.Version.CURRENT + + ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date() + + ", JVM: " + JvmInfo.jvmInfo().version()); + return; + } + + final boolean daemonize = options.has(daemonizeOption); + final String pidFile = pidfileOption.value(options); + + final Map esSettings = new HashMap<>(); + for (final KeyValuePair kvp : propertyOption.values(options)) { + if (!kvp.key.startsWith("es.")) { + throw new UserError(ExitCodes.USAGE, "Elasticsearch settings must be prefixed with [es.] but was [" + kvp.key + "]"); + } + if (kvp.value.isEmpty()) { + throw new UserError(ExitCodes.USAGE, "Elasticsearch setting [" + kvp.key + "] must not be empty"); + } + esSettings.put(kvp.key, kvp.value); + } + + init(daemonize, pidFile, esSettings); + } + + void init(final boolean daemonize, final String pidFile, final Map esSettings) { try { - Bootstrap.init(args); - } catch (Throwable t) { + Bootstrap.init(!daemonize, pidFile, esSettings); + } catch (final Throwable t) { // format exceptions to the console in a special way // to avoid 2MB stacktraces from guice, etc. throw new StartupError(t); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java index 573f3d5be3e..5d1369b21f7 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java @@ -39,6 +39,7 @@ final class JNACLibrary { public static final int MCL_CURRENT = 1; public static final int ENOMEM = 12; public static final int RLIMIT_MEMLOCK = Constants.MAC_OS_X ? 6 : 8; + public static final int RLIMIT_AS = Constants.MAC_OS_X ? 5 : 9; public static final long RLIM_INFINITY = Constants.MAC_OS_X ? 9223372036854775807L : -1L; static { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java index b9d5ce11dbc..e55d38a0f72 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java @@ -52,6 +52,8 @@ class JNANatives { // the user ID that owns the running Elasticsearch process static long MAX_NUMBER_OF_THREADS = -1; + static long MAX_SIZE_VIRTUAL_MEMORY = Long.MIN_VALUE; + static void tryMlockall() { int errno = Integer.MIN_VALUE; String errMsg = null; @@ -76,7 +78,7 @@ class JNANatives { softLimit = rlimit.rlim_cur.longValue(); hardLimit = rlimit.rlim_max.longValue(); } else { - logger.warn("Unable to retrieve resource limits: " + JNACLibrary.strerror(Native.getLastError())); + logger.warn("Unable to retrieve resource limits: {}", JNACLibrary.strerror(Native.getLastError())); } } } catch (UnsatisfiedLinkError e) { @@ -85,19 +87,20 @@ class JNANatives { } // mlockall failed for some reason - logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg); + logger.warn("Unable to lock JVM Memory: error={}, reason={}", errno , errMsg); logger.warn("This can result in part of the JVM being swapped out."); if (errno == JNACLibrary.ENOMEM) { if (rlimitSuccess) { - logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit)); + logger.warn("Increase RLIMIT_MEMLOCK, soft limit: {}, hard limit: {}", rlimitToString(softLimit), rlimitToString(hardLimit)); if (Constants.LINUX) { // give specific instructions for the linux case to make it easy String user = System.getProperty("user.name"); logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" + - "\t# allow user '" + user + "' mlockall\n" + - "\t" + user + " soft memlock unlimited\n" + - "\t" + user + " hard memlock unlimited" - ); + "\t# allow user '{}' mlockall\n" + + "\t{} soft memlock unlimited\n" + + "\t{} hard memlock unlimited", + user, user, user + ); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); } } else { @@ -123,6 +126,17 @@ class JNANatives { } } + static void trySetMaxSizeVirtualMemory() { + if (Constants.LINUX || Constants.MAC_OS_X) { + final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit(); + if (JNACLibrary.getrlimit(JNACLibrary.RLIMIT_AS, rlimit) == 0) { + MAX_SIZE_VIRTUAL_MEMORY = rlimit.rlim_cur.longValue(); + } else { + logger.warn("unable to retrieve max size virtual memory [" + JNACLibrary.strerror(Native.getLastError()) + "]"); + } + } + } + static String rlimitToString(long value) { assert Constants.LINUX || Constants.MAC_OS_X; if (value == JNACLibrary.RLIM_INFINITY) { @@ -155,7 +169,7 @@ class JNANatives { // the amount of memory we wish to lock, plus a small overhead (1MB). SizeT size = new SizeT(JvmInfo.jvmInfo().getMem().getHeapInit().getBytes() + (1024 * 1024)); if (!kernel.SetProcessWorkingSetSize(process, size, size)) { - logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code " + Native.getLastError()); + logger.warn("Unable to lock JVM memory. Failed to set working set size. Error code {}", Native.getLastError()); } else { JNAKernel32Library.MemoryBasicInformation memInfo = new JNAKernel32Library.MemoryBasicInformation(); long address = 0; @@ -188,7 +202,7 @@ class JNANatives { if (result) { logger.debug("console ctrl handler correctly set"); } else { - logger.warn("unknown error " + Native.getLastError() + " when adding console ctrl handler:"); + logger.warn("unknown error {} when adding console ctrl handler", Native.getLastError()); } } catch (UnsatisfiedLinkError e) { // this will have already been logged by Kernel32Library, no need to repeat it diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java index 3f81cd035bd..86629e4fa36 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JVMCheck.java @@ -200,7 +200,7 @@ final class JVMCheck { HotSpotCheck bug = JVM_BROKEN_HOTSPOT_VERSIONS.get(Constants.JVM_VERSION); if (bug != null && bug.check()) { if (bug.getWorkaround().isPresent() && ManagementFactory.getRuntimeMXBean().getInputArguments().contains(bug.getWorkaround().get())) { - Loggers.getLogger(JVMCheck.class).warn(bug.getWarningMessage().get()); + Loggers.getLogger(JVMCheck.class).warn("{}", bug.getWarningMessage().get()); } else { throw new RuntimeException(bug.getErrorMessage()); } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java index 4325c5b7aef..46908e60642 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Seccomp.java @@ -394,7 +394,7 @@ final class Seccomp { method = 0; int errno1 = Native.getLastError(); if (logger.isDebugEnabled()) { - logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): " + JNACLibrary.strerror(errno1) + ", falling back to prctl(PR_SET_SECCOMP)..."); + logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1)); } if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) { int errno2 = Native.getLastError(); diff --git a/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java index a293428192b..2fad8678649 100644 --- a/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java +++ b/core/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java @@ -19,13 +19,13 @@ package org.elasticsearch.cache.recycler; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.recycler.AbstractRecyclerC; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; @@ -43,13 +43,19 @@ import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ public class PageCacheRecycler extends AbstractComponent implements Releasable { - public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER); - public static final Setting LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER); - public static final Setting WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting TYPE_SETTING = + new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); + public static final Setting LIMIT_HEAP_SETTING = + Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope); + public static final Setting WEIGHT_BYTES_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope); + public static final Setting WEIGHT_LONG_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, Property.NodeScope); + public static final Setting WEIGHT_INT_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, Property.NodeScope); // object pages are less useful to us so we give them a lower weight by default - public static final Setting WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER); + public static final Setting WEIGHT_OBJECTS_SETTING = + Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope); private final Recycler bytePage; private final Recycler intPage; diff --git a/core/src/main/java/org/elasticsearch/cli/Command.java b/core/src/main/java/org/elasticsearch/cli/Command.java new file mode 100644 index 00000000000..9e6afdd6638 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/Command.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.io.IOException; +import java.util.Arrays; + +import joptsimple.OptionException; +import joptsimple.OptionParser; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.common.SuppressForbidden; + +/** + * An action to execute within a cli. + */ +public abstract class Command { + + /** A description of the command, used in the help output. */ + protected final String description; + + /** The option parser for this command. */ + protected final OptionParser parser = new OptionParser(); + + private final OptionSpec helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp(); + private final OptionSpec silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output"); + private final OptionSpec verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output"); + + public Command(String description) { + this.description = description; + } + + /** Parses options for this command from args and executes it. */ + public final int main(String[] args, Terminal terminal) throws Exception { + try { + mainWithoutErrorHandling(args, terminal); + } catch (OptionException e) { + printHelp(terminal); + terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + return ExitCodes.USAGE; + } catch (UserError e) { + terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + return e.exitCode; + } + return ExitCodes.OK; + } + + /** + * Executes the command, but all errors are thrown. + */ + void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { + final OptionSet options = parser.parse(args); + + if (options.has(helpOption)) { + printHelp(terminal); + return; + } + + if (options.has(silentOption)) { + if (options.has(verboseOption)) { + // mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it + throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together"); + } + terminal.setVerbosity(Terminal.Verbosity.SILENT); + } else if (options.has(verboseOption)) { + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); + } else { + terminal.setVerbosity(Terminal.Verbosity.NORMAL); + } + + execute(terminal, options); + } + + /** Prints a help message for the command to the terminal. */ + private void printHelp(Terminal terminal) throws IOException { + terminal.println(description); + terminal.println(""); + printAdditionalHelp(terminal); + parser.printHelpOn(terminal.getWriter()); + } + + /** Prints additional help information, specific to the command */ + protected void printAdditionalHelp(Terminal terminal) {} + + @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") + protected static void exit(int status) { + System.exit(status); + } + + /** + * Executes this command. + * + * Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */ + protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; +} diff --git a/core/src/main/java/org/elasticsearch/cli/ExitCodes.java b/core/src/main/java/org/elasticsearch/cli/ExitCodes.java new file mode 100644 index 00000000000..d08deb8b1ad --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/ExitCodes.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +/** + * POSIX exit codes. + */ +public class ExitCodes { + public static final int OK = 0; + public static final int USAGE = 64; /* command line usage error */ + public static final int DATA_ERROR = 65; /* data format error */ + public static final int NO_INPUT = 66; /* cannot open input */ + public static final int NO_USER = 67; /* addressee unknown */ + public static final int NO_HOST = 68; /* host name unknown */ + public static final int UNAVAILABLE = 69; /* service unavailable */ + public static final int CODE_ERROR = 70; /* internal software error */ + public static final int CANT_CREATE = 73; /* can't create (user) output file */ + public static final int IO_ERROR = 74; /* input/output error */ + public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */ + public static final int PROTOCOL = 76; /* remote error in protocol */ + public static final int NOPERM = 77; /* permission denied */ + public static final int CONFIG = 78; /* configuration error */ + + private ExitCodes() { /* no instance, just constants */ } +} diff --git a/core/src/main/java/org/elasticsearch/cli/MultiCommand.java b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java new file mode 100644 index 00000000000..a9feee0c9bf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cli/MultiCommand.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cli; + +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.Map; + +import joptsimple.NonOptionArgumentSpec; +import joptsimple.OptionSet; + +/** + * A cli tool which is made up of multiple subcommands. + */ +public class MultiCommand extends Command { + + protected final Map subcommands = new LinkedHashMap<>(); + + private final NonOptionArgumentSpec arguments = parser.nonOptions("command"); + + public MultiCommand(String description) { + super(description); + parser.posixlyCorrect(true); + } + + @Override + protected void printAdditionalHelp(Terminal terminal) { + if (subcommands.isEmpty()) { + throw new IllegalStateException("No subcommands configured"); + } + terminal.println("Commands"); + terminal.println("--------"); + for (Map.Entry subcommand : subcommands.entrySet()) { + terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description); + } + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + if (subcommands.isEmpty()) { + throw new IllegalStateException("No subcommands configured"); + } + String[] args = arguments.values(options).toArray(new String[0]); + if (args.length == 0) { + throw new UserError(ExitCodes.USAGE, "Missing command"); + } + Command subcommand = subcommands.get(args[0]); + if (subcommand == null) { + throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]"); + } + subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java b/core/src/main/java/org/elasticsearch/cli/Terminal.java similarity index 91% rename from core/src/main/java/org/elasticsearch/common/cli/Terminal.java rename to core/src/main/java/org/elasticsearch/cli/Terminal.java index fbef1f78cc3..d2dc57263dc 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/Terminal.java +++ b/core/src/main/java/org/elasticsearch/cli/Terminal.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; import java.io.BufferedReader; import java.io.Console; @@ -29,7 +29,7 @@ import java.nio.charset.Charset; import org.elasticsearch.common.SuppressForbidden; /** - * A Terminal wraps access to reading input and writing output for a {@link CliTool}. + * A Terminal wraps access to reading input and writing output for a cli. * * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line @@ -61,7 +61,7 @@ public abstract class Terminal { } /** Sets the verbosity of the terminal. */ - void setVerbosity(Verbosity verbosity) { + public void setVerbosity(Verbosity verbosity) { this.verbosity = verbosity; } @@ -89,35 +89,35 @@ public abstract class Terminal { private static class ConsoleTerminal extends Terminal { - private static final Console console = System.console(); + private static final Console CONSOLE = System.console(); ConsoleTerminal() { super(System.lineSeparator()); } static boolean isSupported() { - return console != null; + return CONSOLE != null; } @Override public PrintWriter getWriter() { - return console.writer(); + return CONSOLE.writer(); } @Override public String readText(String prompt) { - return console.readLine("%s", prompt); + return CONSOLE.readLine("%s", prompt); } @Override public char[] readSecret(String prompt) { - return console.readPassword("%s", prompt); + return CONSOLE.readPassword("%s", prompt); } } private static class SystemTerminal extends Terminal { - private final PrintWriter writer = newWriter(); + private static final PrintWriter WRITER = newWriter(); SystemTerminal() { super(System.lineSeparator()); @@ -130,7 +130,7 @@ public abstract class Terminal { @Override public PrintWriter getWriter() { - return writer; + return WRITER; } @Override diff --git a/core/src/main/java/org/elasticsearch/common/cli/UserError.java b/core/src/main/java/org/elasticsearch/cli/UserError.java similarity index 79% rename from core/src/main/java/org/elasticsearch/common/cli/UserError.java rename to core/src/main/java/org/elasticsearch/cli/UserError.java index ad709830885..2a4f2bf1233 100644 --- a/core/src/main/java/org/elasticsearch/common/cli/UserError.java +++ b/core/src/main/java/org/elasticsearch/cli/UserError.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.common.cli; +package org.elasticsearch.cli; /** - * An exception representing a user fixable problem in {@link CliTool} usage. + * An exception representing a user fixable problem in {@link Command} usage. */ public class UserError extends Exception { /** The exist status the cli should use when catching this user error. */ - public final CliTool.ExitStatus exitStatus; + public final int exitCode; /** Constructs a UserError with an exit status and message to show the user. */ - public UserError(CliTool.ExitStatus exitStatus, String msg) { + public UserError(int exitCode, String msg) { super(msg); - this.exitStatus = exitStatus; + this.exitCode = exitCode; } } diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java index f81ba9eb1b1..1c7676bc91c 100644 --- a/core/src/main/java/org/elasticsearch/client/Client.java +++ b/core/src/main/java/org/elasticsearch/client/Client.java @@ -19,12 +19,8 @@ package org.elasticsearch.client; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -72,9 +68,6 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchScrollRequestBuilder; -import org.elasticsearch.action.suggest.SuggestRequest; -import org.elasticsearch.action.suggest.SuggestRequestBuilder; -import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsRequestBuilder; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -87,6 +80,7 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Map; @@ -114,7 +108,7 @@ public interface Client extends ElasticsearchClient, Releasable { default: throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]"); } - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); /** * The admin client that can be used to perform administrative operations. @@ -370,29 +364,6 @@ public interface Client extends ElasticsearchClient, Releasable { */ MultiGetRequestBuilder prepareMultiGet(); - /** - * Suggestion matching a specific phrase. - * - * @param request The suggest request - * @return The result future - * @see Requests#suggestRequest(String...) - */ - ActionFuture suggest(SuggestRequest request); - - /** - * Suggestions matching a specific phrase. - * - * @param request The suggest request - * @param listener A listener to be notified of the result - * @see Requests#suggestRequest(String...) - */ - void suggest(SuggestRequest request, ActionListener listener); - - /** - * Suggestions matching a specific phrase. - */ - SuggestRequestBuilder prepareSuggest(String... indices); - /** * Search across one or more indices and one or more types with a query. * diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 3cf4f3dc6cb..276bd9d9062 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -60,7 +60,6 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.common.xcontent.XContentType; /** @@ -125,16 +124,6 @@ public class Requests { return new GetRequest(index); } - /** - * Creates a suggest request for getting suggestions from provided indices. - * The suggest query has to be set using the JSON source using {@link org.elasticsearch.action.suggest.SuggestRequest#suggest(org.elasticsearch.common.bytes.BytesReference)}. - * @param indices The indices to suggest from. Use null or _all to execute against all indices - * @see org.elasticsearch.client.Client#suggest(org.elasticsearch.action.suggest.SuggestRequest) - */ - public static SuggestRequest suggestRequest(String... indices) { - return new SuggestRequest(indices); - } - /** * Creates a search request against one or more indices. Note, the search source must be set either using the * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. @@ -342,7 +331,8 @@ public class Requests { /** * Creates a cluster health request. * - * @param indices The indices to provide additional cluster health information for. Use null or _all to execute against all indices + * @param indices The indices to provide additional cluster health information for. + * Use null or _all to execute against all indices * @return The cluster health request * @see org.elasticsearch.client.ClusterAdminClient#health(org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest) */ diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index f729d5287df..0044890ee35 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -314,10 +314,6 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchScrollRequestBuilder; -import org.elasticsearch.action.suggest.SuggestAction; -import org.elasticsearch.action.suggest.SuggestRequest; -import org.elasticsearch.action.suggest.SuggestRequestBuilder; -import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; @@ -660,21 +656,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new MultiSearchRequestBuilder(this, MultiSearchAction.INSTANCE); } - @Override - public ActionFuture suggest(final SuggestRequest request) { - return execute(SuggestAction.INSTANCE, request); - } - - @Override - public void suggest(final SuggestRequest request, final ActionListener listener) { - execute(SuggestAction.INSTANCE, request, listener); - } - - @Override - public SuggestRequestBuilder prepareSuggest(String... indices) { - return new SuggestRequestBuilder(this, SuggestAction.INSTANCE).setIndices(indices); - } - @Override public ActionFuture termVectors(final TermVectorsRequest request) { return execute(TermVectorsAction.INSTANCE, request); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 1e605b9de06..abbc5823b2a 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -100,10 +101,14 @@ public class TransportClientNodesService extends AbstractComponent { private volatile boolean closed; - public static final Setting CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER); - public static final Setting CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER); + public static final Setting CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = + Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_PING_TIMEOUT = + Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = + Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope); + public static final Setting CLIENT_TRANSPORT_SNIFF = + Setting.boolSetting("client.transport.sniff", false, Property.NodeScope); @Inject public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService, @@ -119,7 +124,7 @@ public class TransportClientNodesService extends AbstractComponent { this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings); if (logger.isDebugEnabled()) { - logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]"); + logger.debug("node_sampler_interval[{}]", nodesSamplerInterval); } if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) { @@ -318,7 +323,7 @@ public class TransportClientNodesService extends AbstractComponent { transportService.connectToNode(node); } catch (Throwable e) { it.remove(); - logger.debug("failed to connect to discovered node [" + node + "]", e); + logger.debug("failed to connect to discovered node [{}]", e, node); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index e851b7814da..98853e8447f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.Collections; @@ -120,7 +121,7 @@ public class ClusterChangedEvent { /** * Returns the indices deleted in this event */ - public List indicesDeleted() { + public List indicesDeleted() { // If the new cluster state has a new cluster UUID, the likely scenario is that a node was elected // master that has had its data directory wiped out, in which case we don't want to delete the indices and lose data; // rather we want to import them as dangling indices instead. So we check here if the cluster UUID differs from the previous @@ -131,17 +132,18 @@ public class ClusterChangedEvent { if (metaDataChanged() == false || isNewCluster()) { return Collections.emptyList(); } - List deleted = null; - for (ObjectCursor cursor : previousState.metaData().indices().keys()) { - String index = cursor.value; - if (!state.metaData().hasIndex(index)) { + List deleted = null; + for (ObjectCursor cursor : previousState.metaData().indices().values()) { + IndexMetaData index = cursor.value; + IndexMetaData current = state.metaData().index(index.getIndex()); + if (current == null) { if (deleted == null) { deleted = new ArrayList<>(); } - deleted.add(index); + deleted.add(index.getIndex()); } } - return deleted == null ? Collections.emptyList() : deleted; + return deleted == null ? Collections.emptyList() : deleted; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 3e668191ff3..47dd2ce9ae6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -53,11 +53,12 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationD import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.gateway.GatewayAllocator; @@ -74,7 +75,8 @@ public class ClusterModule extends AbstractModule { public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard"; public static final String BALANCED_ALLOCATOR = "balanced"; // default - public static final Setting SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting SHARDS_ALLOCATOR_TYPE_SETTING = + new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope); public static final List> DEFAULT_ALLOCATION_DECIDERS = Collections.unmodifiableList(Arrays.asList( SameShardAllocationDecider.class, @@ -135,7 +137,8 @@ public class ClusterModule extends AbstractModule { bind(GatewayAllocator.class).asEagerSingleton(); bind(AllocationService.class).asEagerSingleton(); bind(DiscoveryNodeService.class).asEagerSingleton(); - bind(ClusterService.class).to(InternalClusterService.class).asEagerSingleton(); + bind(ClusterService.class).asEagerSingleton(); + bind(NodeConnectionsService.class).asEagerSingleton(); bind(OperationRouting.class).asEagerSingleton(); bind(MetaDataCreateIndexService.class).asEagerSingleton(); bind(MetaDataDeleteIndexService.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java index daf3000d710..09c64065dbd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterName.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterName.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -37,7 +38,7 @@ public class ClusterName implements Streamable { throw new IllegalArgumentException("[cluster.name] must not be empty"); } return s; - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern()); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/ClusterService.java deleted file mode 100644 index 27df4b9e96f..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster; - -import org.elasticsearch.cluster.block.ClusterBlock; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.OperationRouting; -import org.elasticsearch.cluster.service.PendingClusterTask; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.tasks.TaskManager; - -import java.util.List; - -/** - * The cluster service allowing to both register for cluster state events ({@link ClusterStateListener}) - * and submit state update tasks ({@link ClusterStateUpdateTask}. - */ -public interface ClusterService extends LifecycleComponent { - - /** - * The local node. - */ - DiscoveryNode localNode(); - - /** - * The current state. - */ - ClusterState state(); - - /** - * Adds an initial block to be set on the first cluster state created. - */ - void addInitialStateBlock(ClusterBlock block) throws IllegalStateException; - - /** - * Remove an initial block to be set on the first cluster state created. - */ - void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException; - - /** - * Remove an initial block to be set on the first cluster state created. - */ - void removeInitialStateBlock(int blockId) throws IllegalStateException; - - /** - * The operation routing. - */ - OperationRouting operationRouting(); - - /** - * Adds a priority listener for updated cluster states. - */ - void addFirst(ClusterStateListener listener); - - /** - * Adds last listener. - */ - void addLast(ClusterStateListener listener); - - /** - * Adds a listener for updated cluster states. - */ - void add(ClusterStateListener listener); - - /** - * Removes a listener for updated cluster states. - */ - void remove(ClusterStateListener listener); - - /** - * Add a listener for on/off local node master events - */ - void add(LocalNodeMasterListener listener); - - /** - * Remove the given listener for on/off local master events - */ - void remove(LocalNodeMasterListener listener); - - /** - * Adds a cluster state listener that will timeout after the provided timeout, - * and is executed after the clusterstate has been successfully applied ie. is - * in state {@link org.elasticsearch.cluster.ClusterState.ClusterStateStatus#APPLIED} - * NOTE: a {@code null} timeout means that the listener will never be removed - * automatically - */ - void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); - - /** - * Submits a cluster state update task; submitted updates will be - * batched across the same instance of executor. The exact batching - * semantics depend on the underlying implementation but a rough - * guideline is that if the update task is submitted while there - * are pending update tasks for the same executor, these update - * tasks will all be executed on the executor in a single batch - * - * @param source the source of the cluster state update task - * @param task the state needed for the cluster state update task - * @param config the cluster state update task configuration - * @param executor the cluster state update task executor; tasks - * that share the same executor will be executed - * batches on this executor - * @param listener callback after the cluster state update task - * completes - * @param the type of the cluster state update task state - */ - void submitStateUpdateTask(final String source, final T task, - final ClusterStateTaskConfig config, - final ClusterStateTaskExecutor executor, - final ClusterStateTaskListener listener); - - /** - * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)}, - * submitted updates will not be batched. - * - * @param source the source of the cluster state update task - * @param updateTask the full context for the cluster state update - * task - */ - void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask); - - /** - * Returns the tasks that are pending. - */ - List pendingTasks(); - - /** - * Returns the number of currently pending tasks. - */ - int numberOfPendingTasks(); - - /** - * Returns the maximum wait time for tasks in the queue - * - * @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue - */ - TimeValue getMaxTaskWaitTime(); - - /** - * Returns task manager created in the cluster service - */ - TaskManager getTaskManager(); -} diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index 1b3ddcfebf9..1ac379555ab 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -63,18 +63,18 @@ import java.util.Set; /** * Represents the current state of the cluster. - * + *

* The cluster state object is immutable with an * exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable}, * and cluster state {@link #status}, which is updated during cluster state publishing and applying * processing. The cluster state can be updated only on the master node. All updates are performed by on a - * single thread and controlled by the {@link InternalClusterService}. After every update the + * single thread and controlled by the {@link ClusterService}. After every update the * {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on * the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish} * method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The * publishing mechanism can be overridden by other discovery. - * + *

* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state * differences instead of the entire state on each change. The publishing mechanism should only send differences * to a node if this node was present in the previous version of the cluster state. If a node is not present was @@ -135,7 +135,7 @@ public class ClusterState implements ToXContent, Diffable { public static T lookupPrototypeSafe(String type) { @SuppressWarnings("unchecked") - T proto = (T)customPrototypes.get(type); + T proto = (T) customPrototypes.get(type); if (proto == null) { throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins"); } @@ -281,6 +281,16 @@ public class ClusterState implements ToXContent, Diffable { sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); + for (IndexMetaData indexMetaData : metaData) { + final String TAB = " "; + sb.append(TAB).append(indexMetaData.getIndex()); + sb.append(": v[").append(indexMetaData.getVersion()).append("]\n"); + for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) { + sb.append(TAB).append(TAB).append(shard).append(": "); + sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], "); + sb.append("a_ids ").append(indexMetaData.activeAllocationIds(shard)).append("\n"); + } + } sb.append(blocks().prettyPrint()); sb.append(nodes().prettyPrint()); sb.append(routingTable().prettyPrint()); @@ -477,6 +487,12 @@ public class ClusterState implements ToXContent, Diffable { } builder.endArray(); + builder.startObject(IndexMetaData.KEY_PRIMARY_TERMS); + for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) { + builder.field(Integer.toString(shard), indexMetaData.primaryTerm(shard)); + } + builder.endObject(); + builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS); for (IntObjectCursor> cursor : indexMetaData.getActiveAllocationIds()) { builder.startArray(String.valueOf(cursor.key)); @@ -487,6 +503,7 @@ public class ClusterState implements ToXContent, Diffable { } builder.endObject(); + // index metadata builder.endObject(); } builder.endObject(); @@ -683,16 +700,16 @@ public class ClusterState implements ToXContent, Diffable { } /** - * @param data input bytes - * @param localNode used to set the local node in the cluster state. + * @param data input bytes + * @param localNode used to set the local node in the cluster state. */ public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException { return readFrom(StreamInput.wrap(data), localNode); } /** - * @param in input stream - * @param localNode used to set the local node in the cluster state. can be null. + * @param in input stream + * @param localNode used to set the local node in the cluster state. can be null. */ public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { return PROTO.readFrom(in, localNode); @@ -791,17 +808,17 @@ public class ClusterState implements ToXContent, Diffable { metaData = proto.metaData.readDiffFrom(in); blocks = proto.blocks.readDiffFrom(in); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } + new DiffableUtils.DiffableValueSerializer() { + @Override + public Custom read(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); + @Override + public Diff readDiff(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index dd30a711688..d79a00dc3fe 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.TimeValue; @@ -44,7 +45,7 @@ public class ClusterStateObserver { } }; - private final ClusterService clusterService; + private final ClusterService clusterService; private final ThreadContext contextHolder; volatile TimeValue timeOutValue; diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 5107b4495ab..9a9ee06ce19 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -33,12 +33,14 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -64,8 +66,12 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER); - public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER); + public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = + Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), + Property.Dynamic, Property.NodeScope); + public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = + Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), + Property.Dynamic, Property.NodeScope); private volatile TimeValue updateFrequency; diff --git a/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java new file mode 100644 index 00000000000..698f9d1090c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -0,0 +1,160 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.KeyedLock; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ScheduledFuture; + +import static org.elasticsearch.common.settings.Setting.Property; +import static org.elasticsearch.common.settings.Setting.positiveTimeSetting; + + +/** + * This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are + * removed. Also, it periodically checks that all connections are still open and if needed restores them. + * Note that this component is *not* responsible for removing nodes from the cluster if they disconnect / do not respond + * to pings. This is done by {@link org.elasticsearch.discovery.zen.fd.NodesFaultDetection}. Master fault detection + * is done by {@link org.elasticsearch.discovery.zen.fd.MasterFaultDetection}. + */ +public class NodeConnectionsService extends AbstractLifecycleComponent { + + public static final Setting CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = + positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope); + private final ThreadPool threadPool; + private final TransportService transportService; + + // map between current node and the number of failed connection attempts. 0 means successfully connected. + // if a node doesn't appear in this list it shouldn't be monitored + private ConcurrentMap nodes = ConcurrentCollections.newConcurrentMap(); + + final private KeyedLock nodeLocks = new KeyedLock<>(); + + private final TimeValue reconnectInterval; + + private volatile ScheduledFuture backgroundFuture = null; + + @Inject + public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) { + super(settings); + this.threadPool = threadPool; + this.transportService = transportService; + this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); + } + + public void connectToAddedNodes(ClusterChangedEvent event) { + + // TODO: do this in parallel (and wait) + for (final DiscoveryNode node : event.nodesDelta().addedNodes()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + Integer current = nodes.put(node, 0); + assert current == null : "node " + node + " was added in event but already in internal nodes"; + validateNodeConnected(node); + } + } + } + + public void disconnectFromRemovedNodes(ClusterChangedEvent event) { + for (final DiscoveryNode node : event.nodesDelta().removedNodes()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + Integer current = nodes.remove(node); + assert current != null : "node " + node + " was removed in event but not in internal nodes"; + try { + transportService.disconnectFromNode(node); + } catch (Throwable e) { + logger.warn("failed to disconnect to node [" + node + "]", e); + } + } + } + } + + void validateNodeConnected(DiscoveryNode node) { + assert nodeLocks.isHeldByCurrentThread(node) : "validateNodeConnected must be called under lock"; + if (lifecycle.stoppedOrClosed() || + nodes.containsKey(node) == false) { // we double check existence of node since connectToNode might take time... + // nothing to do + } else { + try { + // connecting to an already connected node is a noop + transportService.connectToNode(node); + nodes.put(node, 0); + } catch (Exception e) { + Integer nodeFailureCount = nodes.get(node); + assert nodeFailureCount != null : node + " didn't have a counter in nodes map"; + nodeFailureCount = nodeFailureCount + 1; + // log every 6th failure + if ((nodeFailureCount % 6) == 1) { + logger.warn("failed to connect to node {} (tried [{}] times)", e, node, nodeFailureCount); + } + nodes.put(node, nodeFailureCount); + } + } + } + + class ConnectionChecker extends AbstractRunnable { + + @Override + public void onFailure(Throwable t) { + logger.warn("unexpected error while checking for node reconnects", t); + } + + protected void doRun() { + for (DiscoveryNode node : nodes.keySet()) { + try (Releasable ignored = nodeLocks.acquire(node)) { + validateNodeConnected(node); + } + } + } + + @Override + public void onAfter() { + if (lifecycle.started()) { + backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this); + } + } + } + + @Override + protected void doStart() { + backgroundFuture = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ConnectionChecker()); + } + + @Override + protected void doStop() { + FutureUtils.cancel(backgroundFuture); + } + + @Override + protected void doClose() { + + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 78eef316332..c90edee0d50 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -69,15 +69,17 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private final State state; private final SnapshotId snapshotId; private final boolean includeGlobalState; + private final boolean partial; private final ImmutableOpenMap shards; private final List indices; private final ImmutableOpenMap> waitingIndices; private final long startTime; - public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List indices, long startTime, ImmutableOpenMap shards) { + public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List indices, long startTime, ImmutableOpenMap shards) { this.state = state; this.snapshotId = snapshotId; this.includeGlobalState = includeGlobalState; + this.partial = partial; this.indices = indices; this.startTime = startTime; if (shards == null) { @@ -90,7 +92,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } public Entry(Entry entry, State state, ImmutableOpenMap shards) { - this(entry.snapshotId, entry.includeGlobalState, state, entry.indices, entry.startTime, shards); + this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards); } public Entry(Entry entry, ImmutableOpenMap shards) { @@ -121,6 +123,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return includeGlobalState; } + public boolean partial() { + return partial; + } + public long startTime() { return startTime; } @@ -133,6 +139,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus Entry entry = (Entry) o; if (includeGlobalState != entry.includeGlobalState) return false; + if (partial != entry.partial) return false; if (startTime != entry.startTime) return false; if (!indices.equals(entry.indices)) return false; if (!shards.equals(entry.shards)) return false; @@ -148,6 +155,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus int result = state.hashCode(); result = 31 * result + snapshotId.hashCode(); result = 31 * result + (includeGlobalState ? 1 : 0); + result = 31 * result + (partial ? 1 : 0); result = 31 * result + shards.hashCode(); result = 31 * result + indices.hashCode(); result = 31 * result + waitingIndices.hashCode(); @@ -360,6 +368,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus for (int i = 0; i < entries.length; i++) { SnapshotId snapshotId = SnapshotId.readSnapshotId(in); boolean includeGlobalState = in.readBoolean(); + boolean partial = in.readBoolean(); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); List indexBuilder = new ArrayList<>(); @@ -375,7 +384,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus State shardState = State.fromValue(in.readByte()); builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } - entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); + entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); } return new SnapshotsInProgress(entries); } @@ -386,6 +395,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus for (Entry entry : entries) { entry.snapshotId().writeTo(out); out.writeBoolean(entry.includeGlobalState()); + out.writeBoolean(entry.partial()); out.writeByte(entry.state().value()); out.writeVInt(entry.indices().size()); for (String index : entry.indices()) { @@ -406,6 +416,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); + static final XContentBuilderString PARTIAL = new XContentBuilderString("partial"); static final XContentBuilderString STATE = new XContentBuilderString("state"); static final XContentBuilderString INDICES = new XContentBuilderString("indices"); static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); @@ -431,6 +442,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(Fields.PARTIAL, entry.partial()); builder.field(Fields.STATE, entry.state()); builder.startArray(Fields.INDICES); { diff --git a/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java index c691abe5906..33d716cb965 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/cluster/ack/IndicesClusterStateUpdateRequest.java @@ -18,17 +18,19 @@ */ package org.elasticsearch.cluster.ack; +import org.elasticsearch.index.Index; + /** * Base cluster state update request that allows to execute update against multiple indices */ public abstract class IndicesClusterStateUpdateRequest> extends ClusterStateUpdateRequest { - private String[] indices; + private Index[] indices; /** * Returns the indices the operation needs to be executed on */ - public String[] indices() { + public Index[] indices() { return indices; } @@ -36,7 +38,7 @@ public abstract class IndicesClusterStateUpdateRequest INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = + Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; @@ -62,48 +64,20 @@ public class MappingUpdatedAction extends AbstractComponent { this.client = client.admin().indices(); } - private PutMappingRequestBuilder updateMappingRequest(String index, String type, Mapping mappingUpdate, final TimeValue timeout) { + private PutMappingRequestBuilder updateMappingRequest(Index index, String type, Mapping mappingUpdate, final TimeValue timeout) { if (type.equals(MapperService.DEFAULT_MAPPING)) { throw new IllegalArgumentException("_default_ mapping should not be updated"); } - return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString()) + return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString()) .setMasterNodeTimeout(timeout).setTimeout(timeout); } - public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) { - final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout); - if (listener == null) { - request.execute(); - } else { - final ActionListener actionListener = new ActionListener() { - @Override - public void onResponse(PutMappingResponse response) { - if (response.isAcknowledged()) { - listener.onMappingUpdate(); - } else { - listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]")); - } - } - - @Override - public void onFailure(Throwable e) { - listener.onFailure(e); - } - }; - request.execute(actionListener); - } - } - - public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception { - updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null); - } - /** - * Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)} + * Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)} * using the default timeout. */ - public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception { - updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout); + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) throws Exception { + updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout); } /** @@ -111,19 +85,9 @@ public class MappingUpdatedAction extends AbstractComponent { * {@code timeout}. When this method returns successfully mappings have * been applied to the master node and propagated to data nodes. */ - public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception { + public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception { if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); } } - - /** - * A listener to be notified when the mappings were updated - */ - public static interface MappingUpdateListener { - - void onMappingUpdate(); - - void onFailure(Throwable t); - } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java index 012cc66e110..93fce95fc23 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java @@ -76,18 +76,18 @@ public class NodeIndexDeletedAction extends AbstractComponent { listeners.remove(listener); } - public void nodeIndexDeleted(final ClusterState clusterState, final String index, final IndexSettings indexSettings, final String nodeId) { + public void nodeIndexDeleted(final ClusterState clusterState, final Index index, final IndexSettings indexSettings, final String nodeId) { final DiscoveryNodes nodes = clusterState.nodes(); transportService.sendRequest(clusterState.nodes().masterNode(), INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); if (nodes.localNode().isDataNode() == false) { - logger.trace("[{}] not acking store deletion (not a data node)"); + logger.trace("[{}] not acking store deletion (not a data node)", index); return; } threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Throwable t) { - logger.warn("[{}]failed to ack index store deleted for index", t, index); + logger.warn("[{}] failed to ack index store deleted for index", t, index); } @Override @@ -97,7 +97,7 @@ public class NodeIndexDeletedAction extends AbstractComponent { }); } - private void lockIndexAndAck(String index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException { + private void lockIndexAndAck(Index index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException { try { // we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store to the // master. If we can't acquire the locks here immediately there might be a shard of this index still holding on to the lock @@ -114,9 +114,9 @@ public class NodeIndexDeletedAction extends AbstractComponent { } public interface Listener { - void onNodeIndexDeleted(String index, String nodeId); + void onNodeIndexDeleted(Index index, String nodeId); - void onNodeIndexStoreDeleted(String index, String nodeId); + void onNodeIndexStoreDeleted(Index index, String nodeId); } private class NodeIndexDeletedTransportHandler implements TransportRequestHandler { @@ -143,13 +143,13 @@ public class NodeIndexDeletedAction extends AbstractComponent { public static class NodeIndexDeletedMessage extends TransportRequest { - String index; + Index index; String nodeId; public NodeIndexDeletedMessage() { } - NodeIndexDeletedMessage(String index, String nodeId) { + NodeIndexDeletedMessage(Index index, String nodeId) { this.index = index; this.nodeId = nodeId; } @@ -157,27 +157,27 @@ public class NodeIndexDeletedAction extends AbstractComponent { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(index); + index.writeTo(out); out.writeString(nodeId); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - index = in.readString(); + index = new Index(in); nodeId = in.readString(); } } public static class NodeIndexStoreDeletedMessage extends TransportRequest { - String index; + Index index; String nodeId; public NodeIndexStoreDeletedMessage() { } - NodeIndexStoreDeletedMessage(String index, String nodeId) { + NodeIndexStoreDeletedMessage(Index index, String nodeId) { this.index = index; this.nodeId = nodeId; } @@ -185,14 +185,14 @@ public class NodeIndexDeletedAction extends AbstractComponent { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(index); + index.writeTo(out); out.writeString(nodeId); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - index = in.readString(); + index = new Index(in); nodeId = in.readString(); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index f0cd8cd9ca9..5d7dd0b830c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.action.shard; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateTaskConfig; @@ -37,6 +36,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; @@ -151,7 +151,7 @@ public class ShardStateAction extends AbstractComponent { @Override public void onNewClusterState(ClusterState state) { if (logger.isTraceEnabled()) { - logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", shardRoutingEntry.getShardRouting().shardId(), state.prettyPrint(), shardRoutingEntry); + logger.trace("new cluster state [{}] after waiting for master election to fail shard [{}]", state.prettyPrint(), shardRoutingEntry); } sendShardAction(actionName, observer, shardRoutingEntry, listener); } @@ -321,7 +321,7 @@ public class ShardStateAction extends AbstractComponent { if (numberOfUnassignedShards > 0) { String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); if (logger.isTraceEnabled()) { - logger.trace(reason + ", scheduling a reroute"); + logger.trace("{}, scheduling a reroute", reason); } routingService.reroute(reason); } diff --git a/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java b/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java index d66a2437ef2..42ab496fe33 100644 --- a/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java +++ b/core/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java @@ -79,7 +79,7 @@ public final class ClusterStateHealth implements Iterable, S * @param clusterState The current cluster state. Must not be null. */ public ClusterStateHealth(ClusterState clusterState) { - this(clusterState, clusterState.metaData().concreteAllIndices()); + this(clusterState, clusterState.metaData().getConcreteAllIndices()); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index d9b288bb897..4b4a8e54d7c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; /** * This class acts as a functional wrapper around the index.auto_expand_replicas setting. @@ -56,7 +57,7 @@ final class AutoExpandReplicas { } } return new AutoExpandReplicas(min, max, true); - }, true, Setting.Scope.INDEX); + }, Property.Dynamic, Property.IndexScope); private final int minReplicas; private final int maxReplicas; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 7b4d5a68cec..ca3c153e1d6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; @@ -29,6 +30,8 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -38,6 +41,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -45,6 +49,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.RestStatus; @@ -53,6 +59,7 @@ import org.joda.time.DateTimeZone; import java.io.IOException; import java.text.ParseException; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -152,28 +159,36 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String INDEX_SETTING_PREFIX = "index."; public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards"; - public static final Setting INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX); + public static final Setting INDEX_NUMBER_OF_SHARDS_SETTING = + Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope); public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas"; - public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = + Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope); public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas"; - public static final Setting INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX); + public static final Setting INDEX_SHADOW_REPLICAS_SETTING = + Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope); public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem"; - public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX); + public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = + Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, Property.IndexScope); public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; public static final String SETTING_READ_ONLY = "index.blocks.read_only"; - public static final Setting INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_READ_ONLY_SETTING = + Setting.boolSetting(SETTING_READ_ONLY, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_READ = "index.blocks.read"; - public static final Setting INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_READ_SETTING = + Setting.boolSetting(SETTING_BLOCKS_READ, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_WRITE = "index.blocks.write"; - public static final Setting INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_WRITE_SETTING = + Setting.boolSetting(SETTING_BLOCKS_WRITE, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata"; - public static final Setting INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_BLOCKS_METADATA_SETTING = + Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope); public static final String SETTING_VERSION_CREATED = "index.version.created"; public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string"; @@ -182,30 +197,45 @@ public class IndexMetaData implements Diffable, FromXContentBuild public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible"; public static final String SETTING_CREATION_DATE = "index.creation_date"; public static final String SETTING_PRIORITY = "index.priority"; - public static final Setting INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_PRIORITY_SETTING = + Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope); public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string"; public static final String SETTING_INDEX_UUID = "index.uuid"; public static final String SETTING_DATA_PATH = "index.data_path"; - public static final Setting INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_DATA_PATH_SETTING = + new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope); public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; - public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX); + public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = + Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope); public static final String INDEX_UUID_NA_VALUE = "_na_"; - public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX); - public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX); - public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX); + public static final Setting INDEX_ROUTING_REQUIRE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.require.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_INCLUDE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = + Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope); public static final IndexMetaData PROTO = IndexMetaData.builder("") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) .numberOfShards(1).numberOfReplicas(0).build(); public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations"; + static final String KEY_VERSION = "version"; + static final String KEY_SETTINGS = "settings"; + static final String KEY_STATE = "state"; + static final String KEY_MAPPINGS = "mappings"; + static final String KEY_ALIASES = "aliases"; + public static final String KEY_PRIMARY_TERMS = "primary_terms"; + + public static final String INDEX_STATE_FILE_PREFIX = "state-"; private final int numberOfShards; private final int numberOfReplicas; private final Index index; private final long version; + private final long[] primaryTerms; private final State state; @@ -229,7 +259,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild private final Version indexUpgradedVersion; private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion; - private IndexMetaData(Index index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings, + private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customs, ImmutableOpenIntMap> activeAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, @@ -237,6 +267,8 @@ public class IndexMetaData implements Diffable, FromXContentBuild this.index = index; this.version = version; + this.primaryTerms = primaryTerms; + assert primaryTerms.length == numberOfShards; this.state = state; this.numberOfShards = numberOfShards; this.numberOfReplicas = numberOfReplicas; @@ -278,6 +310,16 @@ public class IndexMetaData implements Diffable, FromXContentBuild return this.version; } + + /** + * The term of the current selected primary. This is a non-negative number incremented when + * a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary + * See {@link AllocationService#updateMetaDataWithRoutingTable(MetaData, RoutingTable, RoutingTable)}. + **/ + public long primaryTerm(int shardId) { + return this.primaryTerms[shardId]; + } + /** * Return the {@link Version} on which this index has been created. This * information is typically useful for backward compatibility. @@ -398,6 +440,10 @@ public class IndexMetaData implements Diffable, FromXContentBuild IndexMetaData that = (IndexMetaData) o; + if (version != that.version) { + return false; + } + if (!aliases.equals(that.aliases)) { return false; } @@ -416,6 +462,10 @@ public class IndexMetaData implements Diffable, FromXContentBuild if (!customs.equals(that.customs)) { return false; } + + if (Arrays.equals(primaryTerms, that.primaryTerms) == false) { + return false; + } if (!activeAllocationIds.equals(that.activeAllocationIds)) { return false; } @@ -425,14 +475,18 @@ public class IndexMetaData implements Diffable, FromXContentBuild @Override public int hashCode() { int result = index.hashCode(); + result = 31 * result + Long.hashCode(version); result = 31 * result + state.hashCode(); result = 31 * result + aliases.hashCode(); result = 31 * result + settings.hashCode(); result = 31 * result + mappings.hashCode(); + result = 31 * result + customs.hashCode(); + result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + activeAllocationIds.hashCode(); return result; } + @Override public Diff diff(IndexMetaData previousState) { return new IndexMetaDataDiff(previousState, this); @@ -458,6 +512,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild private final String index; private final long version; + private final long[] primaryTerms; private final State state; private final Settings settings; private final Diff> mappings; @@ -470,11 +525,12 @@ public class IndexMetaData implements Diffable, FromXContentBuild version = after.version; state = after.state; settings = after.settings; + primaryTerms = after.primaryTerms; mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer()); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer()); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds, - DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); + DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); } public IndexMetaDataDiff(StreamInput in) throws IOException { @@ -482,22 +538,23 @@ public class IndexMetaData implements Diffable, FromXContentBuild version = in.readLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); + primaryTerms = in.readVLongArray(); mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } + new DiffableUtils.DiffableValueSerializer() { + @Override + public Custom read(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); + @Override + public Diff readDiff(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), - DiffableUtils.StringSetValueSerializer.getInstance()); + DiffableUtils.StringSetValueSerializer.getInstance()); } @Override @@ -506,6 +563,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild out.writeLong(version); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); + out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); customs.writeTo(out); @@ -518,6 +576,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild builder.version(version); builder.state(state); builder.settings(settings); + builder.primaryTerms(primaryTerms); builder.mappings.putAll(mappings.apply(part.mappings)); builder.aliases.putAll(aliases.apply(part.aliases)); builder.customs.putAll(customs.apply(part.customs)); @@ -532,6 +591,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild builder.version(in.readLong()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); + builder.primaryTerms(in.readVLongArray()); int mappingsSize = in.readVInt(); for (int i = 0; i < mappingsSize; i++) { MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in); @@ -563,6 +623,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild out.writeLong(version); out.writeByte(state.id()); writeSettingsToStream(settings, out); + out.writeVLongArray(primaryTerms); out.writeVInt(mappings.size()); for (ObjectCursor cursor : mappings.values()) { cursor.value.writeTo(out); @@ -596,6 +657,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild private String index; private State state = State.OPEN; private long version = 1; + private long[] primaryTerms = null; private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final ImmutableOpenMap.Builder mappings; private final ImmutableOpenMap.Builder aliases; @@ -615,6 +677,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild this.state = indexMetaData.state; this.version = indexMetaData.version; this.settings = indexMetaData.getSettings(); + this.primaryTerms = indexMetaData.primaryTerms.clone(); this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); this.customs = ImmutableOpenMap.builder(indexMetaData.customs); @@ -654,8 +717,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild } public Builder settings(Settings.Builder settings) { - this.settings = settings.build(); - return this; + return settings(settings.build()); } public Builder settings(Settings settings) { @@ -723,6 +785,42 @@ public class IndexMetaData implements Diffable, FromXContentBuild return this; } + /** + * returns the primary term for the given shard. + * See {@link IndexMetaData#primaryTerm(int)} for more information. + */ + public long primaryTerm(int shardId) { + if (primaryTerms == null) { + initializePrimaryTerms(); + } + return this.primaryTerms[shardId]; + } + + /** + * sets the primary term for the given shard. + * See {@link IndexMetaData#primaryTerm(int)} for more information. + */ + public Builder primaryTerm(int shardId, long primaryTerm) { + if (primaryTerms == null) { + initializePrimaryTerms(); + } + this.primaryTerms[shardId] = primaryTerm; + return this; + } + + private void primaryTerms(long[] primaryTerms) { + this.primaryTerms = primaryTerms.clone(); + } + + private void initializePrimaryTerms() { + assert primaryTerms == null; + if (numberOfShards() < 0) { + throw new IllegalStateException("you must set the number of shards before setting/reading primary terms"); + } + primaryTerms = new long[numberOfShards()]; + } + + public IndexMetaData build() { ImmutableOpenMap.Builder tmpAliases = aliases; Settings tmpSettings = settings; @@ -797,27 +895,34 @@ public class IndexMetaData implements Diffable, FromXContentBuild minimumCompatibleLuceneVersion = null; } + if (primaryTerms == null) { + initializePrimaryTerms(); + } else if (primaryTerms.length != numberOfShards) { + throw new IllegalStateException("primaryTerms length is [" + primaryTerms.length + + "] but should be equal to number of shards [" + numberOfShards() + "]"); + } + final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); - return new IndexMetaData(new Index(index, uuid), version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion); + return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), + tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters, + indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(indexMetaData.getIndex().getName(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("version", indexMetaData.getVersion()); - builder.field("state", indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH)); + builder.field(KEY_VERSION, indexMetaData.getVersion()); + builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH)); boolean binary = params.paramAsBoolean("binary", false); - builder.startObject("settings"); + builder.startObject(KEY_SETTINGS); for (Map.Entry entry : indexMetaData.getSettings().getAsMap().entrySet()) { builder.field(entry.getKey(), entry.getValue()); } builder.endObject(); - builder.startArray("mappings"); + builder.startArray(KEY_MAPPINGS); for (ObjectObjectCursor cursor : indexMetaData.getMappings()) { if (binary) { builder.value(cursor.value.source().compressed()); @@ -837,12 +942,18 @@ public class IndexMetaData implements Diffable, FromXContentBuild builder.endObject(); } - builder.startObject("aliases"); + builder.startObject(KEY_ALIASES); for (ObjectCursor cursor : indexMetaData.getAliases().values()) { AliasMetaData.Builder.toXContent(cursor.value, builder, params); } builder.endObject(); + builder.startArray(KEY_PRIMARY_TERMS); + for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) { + builder.value(indexMetaData.primaryTerm(i)); + } + builder.endArray(); + builder.startObject(KEY_ACTIVE_ALLOCATIONS); for (IntObjectCursor> cursor : indexMetaData.activeAllocationIds) { builder.startArray(String.valueOf(cursor.key)); @@ -877,9 +988,9 @@ public class IndexMetaData implements Diffable, FromXContentBuild if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { - if ("settings".equals(currentFieldName)) { + if (KEY_SETTINGS.equals(currentFieldName)) { builder.settings(Settings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()))); - } else if ("mappings".equals(currentFieldName)) { + } else if (KEY_MAPPINGS.equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -891,7 +1002,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild throw new IllegalArgumentException("Unexpected token: " + token); } } - } else if ("aliases".equals(currentFieldName)) { + } else if (KEY_ALIASES.equals(currentFieldName)) { while (parser.nextToken() != XContentParser.Token.END_OBJECT) { builder.putAlias(AliasMetaData.Builder.fromXContent(parser)); } @@ -931,7 +1042,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild } } } else if (token == XContentParser.Token.START_ARRAY) { - if ("mappings".equals(currentFieldName)) { + if (KEY_MAPPINGS.equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { builder.putMapping(new MappingMetaData(new CompressedXContent(parser.binaryValue()))); @@ -943,13 +1054,23 @@ public class IndexMetaData implements Diffable, FromXContentBuild } } } + } else if (KEY_PRIMARY_TERMS.equals(currentFieldName)) { + LongArrayList list = new LongArrayList(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.VALUE_NUMBER) { + list.add(parser.longValue()); + } else { + throw new IllegalStateException("found a non-numeric value under [" + KEY_PRIMARY_TERMS + "]"); + } + } + builder.primaryTerms(list.toArray()); } else { throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName); } } else if (token.isValue()) { - if ("state".equals(currentFieldName)) { + if (KEY_STATE.equals(currentFieldName)) { builder.state(State.fromString(parser.text())); - } else if ("version".equals(currentFieldName)) { + } else if (KEY_VERSION.equals(currentFieldName)) { builder.version(parser.longValue()); } else { throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); @@ -1008,4 +1129,21 @@ public class IndexMetaData implements Diffable, FromXContentBuild return builder.build(); } + private static final ToXContent.Params FORMAT_PARAMS = new MapParams(Collections.singletonMap("binary", "true")); + + /** + * State format for {@link IndexMetaData} to write to and load from disk + */ + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, INDEX_STATE_FILE_PREFIX) { + + @Override + public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { + Builder.toXContent(state, builder, FORMAT_PARAMS); + } + + @Override + public IndexMetaData fromXContent(XContentParser parser) throws IOException { + return Builder.fromXContent(parser); + } + }; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index cca633a7651..2abbea04d51 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.indices.IndexClosedException; import org.joda.time.DateTimeZone; @@ -65,11 +66,20 @@ public class IndexNameExpressionResolver extends AbstractComponent { ); } + /** + * Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options + * are encapsulated in the specified request. + */ + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { + Context context = new Context(state, request.indicesOptions()); + return concreteIndexNames(context, request.indices()); + } + /** * Same as {@link #concreteIndices(ClusterState, IndicesOptions, String...)}, but the index expressions and options * are encapsulated in the specified request. */ - public String[] concreteIndices(ClusterState state, IndicesRequest request) { + public Index[] concreteIndices(ClusterState state, IndicesRequest request) { Context context = new Context(state, request.indicesOptions()); return concreteIndices(context, request.indices()); } @@ -87,7 +97,25 @@ public class IndexNameExpressionResolver extends AbstractComponent { * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options in the context don't allow such a case. */ - public String[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) { + public String[] concreteIndexNames(ClusterState state, IndicesOptions options, String... indexExpressions) { + Context context = new Context(state, options); + return concreteIndexNames(context, indexExpressions); + } + + /** + * Translates the provided index expression into actual concrete indices, properly deduplicated. + * + * @param state the cluster state containing all the data to resolve to expressions to concrete indices + * @param options defines how the aliases or indices need to be resolved to concrete indices + * @param indexExpressions expressions that can be resolved to alias or index names. + * @return the resolved concrete indices based on the cluster state, indices options and index expressions + * @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the + * provided indices options in the context don't allow such a case, or if the final result of the indices resolution + * contains no indices and the indices options in the context don't allow such a case. + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options in the context don't allow such a case. + */ + public Index[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) { Context context = new Context(state, options); return concreteIndices(context, indexExpressions); } @@ -105,12 +133,21 @@ public class IndexNameExpressionResolver extends AbstractComponent { * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options in the context don't allow such a case. */ - public String[] concreteIndices(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) { + public String[] concreteIndexNames(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) { Context context = new Context(state, options, startTime); - return concreteIndices(context, indexExpressions); + return concreteIndexNames(context, indexExpressions); } - String[] concreteIndices(Context context, String... indexExpressions) { + String[] concreteIndexNames(Context context, String... indexExpressions) { + Index[] indexes = concreteIndices(context, indexExpressions); + String[] names = new String[indexes.length]; + for (int i = 0; i < indexes.length; i++) { + names[i] = indexes[i].getName(); + } + return names; + } + + Index[] concreteIndices(Context context, String... indexExpressions) { if (indexExpressions == null || indexExpressions.length == 0) { indexExpressions = new String[]{MetaData.ALL}; } @@ -136,11 +173,11 @@ public class IndexNameExpressionResolver extends AbstractComponent { infe.setResources("index_expression", indexExpressions); throw infe; } else { - return Strings.EMPTY_ARRAY; + return Index.EMPTY_ARRAY; } } - final Set concreteIndices = new HashSet<>(expressions.size()); + final Set concreteIndices = new HashSet<>(expressions.size()); for (String expression : expressions) { AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression); if (aliasOrIndex == null) { @@ -169,11 +206,11 @@ public class IndexNameExpressionResolver extends AbstractComponent { throw new IndexClosedException(index.getIndex()); } else { if (options.forbidClosedIndices() == false) { - concreteIndices.add(index.getIndex().getName()); + concreteIndices.add(index.getIndex()); } } } else if (index.getState() == IndexMetaData.State.OPEN) { - concreteIndices.add(index.getIndex().getName()); + concreteIndices.add(index.getIndex()); } else { throw new IllegalStateException("index state [" + index.getState() + "] not supported"); } @@ -185,7 +222,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { infe.setResources("index_expression", indexExpressions); throw infe; } - return concreteIndices.toArray(new String[concreteIndices.size()]); + return concreteIndices.toArray(new Index[concreteIndices.size()]); } /** @@ -200,9 +237,9 @@ public class IndexNameExpressionResolver extends AbstractComponent { * @throws IllegalArgumentException if the index resolution lead to more than one index * @return the concrete index obtained as a result of the index resolution */ - public String concreteSingleIndex(ClusterState state, IndicesRequest request) { + public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { String indexExpression = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null; - String[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); + Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); if (indices.length != 1) { throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); } @@ -395,7 +432,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { if (routing != null) { Set r = Strings.splitStringByCommaToSet(routing); Map> routings = new HashMap<>(); - String[] concreteIndices = metaData.concreteAllIndices(); + String[] concreteIndices = metaData.getConcreteAllIndices(); for (String index : concreteIndices) { routings.put(index, r); } @@ -435,7 +472,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { */ boolean isPatternMatchingAllIndices(MetaData metaData, String[] indicesOrAliases, String[] concreteIndices) { // if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure - if (concreteIndices.length == metaData.concreteAllIndices().length && indicesOrAliases.length > 0) { + if (concreteIndices.length == metaData.getConcreteAllIndices().length && indicesOrAliases.length > 0) { //we might have something like /-test1,+test1 that would identify all indices //or something like /-test1 with test1 index missing and IndicesOptions.lenient() @@ -686,16 +723,16 @@ public class IndexNameExpressionResolver extends AbstractComponent { } private boolean isEmptyOrTrivialWildcard(List expressions) { - return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0))) || Regex.isMatchAllPattern(expressions.get(0))); + return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); } private List resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) { if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) { - return Arrays.asList(metaData.concreteAllIndices()); + return Arrays.asList(metaData.getConcreteAllIndices()); } else if (options.expandWildcardsOpen()) { - return Arrays.asList(metaData.concreteAllOpenIndices()); + return Arrays.asList(metaData.getConcreteAllOpenIndices()); } else if (options.expandWildcardsClosed()) { - return Arrays.asList(metaData.concreteAllClosedIndices()); + return Arrays.asList(metaData.getConcreteAllClosedIndices()); } else { assert assertEmpty : "Shouldn't end up here"; return Collections.emptyList(); @@ -867,7 +904,7 @@ public class IndexNameExpressionResolver extends AbstractComponent { * Returns true iff the given expression resolves to the given index name otherwise false */ public final boolean matchesIndex(String indexName, String expression, ClusterState state) { - final String[] concreteIndices = concreteIndices(state, IndicesOptions.lenientExpandOpen(), expression); + final String[] concreteIndices = concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), expression); for (String index : concreteIndices) { if (Regex.simpleMatch(index, indexName)) { return true; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index a88f1609b9e..10b05c46657 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -84,20 +84,10 @@ public class MappingMetaData extends AbstractDiffable { private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis"); - public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter, - Version version) throws TimestampParsingException { + public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException { try { - // no need for unix timestamp parsing in 2.x - FormatDateTimeFormatter formatter = version.onOrAfter(Version.V_2_0_0_beta1) ? dateTimeFormatter : EPOCH_MILLIS_PARSER; - return Long.toString(formatter.parser().parseMillis(timestampAsString)); + return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString)); } catch (RuntimeException e) { - if (version.before(Version.V_2_0_0_beta1)) { - try { - return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString)); - } catch (RuntimeException e1) { - throw new TimestampParsingException(timestampAsString, e1); - } - } throw new TimestampParsingException(timestampAsString, e); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index d7dddb15984..db6871b0645 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -30,7 +30,7 @@ import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -50,6 +51,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.store.IndexStoreConfig; @@ -139,7 +141,8 @@ public class MetaData implements Iterable, Diffable, Fr } - public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER); + public static final Setting SETTING_READ_ONLY_SETTING = + Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -151,6 +154,8 @@ public class MetaData implements Iterable, Diffable, Fr public static final String CONTEXT_MODE_GATEWAY = XContentContext.GATEWAY.toString(); + public static final String GLOBAL_STATE_FILE_PREFIX = "global-"; + private final String clusterUUID; private final long version; @@ -230,7 +235,7 @@ public class MetaData implements Iterable, Diffable, Fr public boolean equalsAliases(MetaData other) { for (ObjectCursor cursor : other.indices().values()) { IndexMetaData otherIndex = cursor.value; - IndexMetaData thisIndex= index(otherIndex.getIndex()); + IndexMetaData thisIndex = index(otherIndex.getIndex()); if (thisIndex == null) { return false; } @@ -368,26 +373,14 @@ public class MetaData implements Iterable, Diffable, Fr /** * Returns all the concrete indices. */ - public String[] concreteAllIndices() { - return allIndices; - } - public String[] getConcreteAllIndices() { - return concreteAllIndices(); - } - - public String[] concreteAllOpenIndices() { - return allOpenIndices; + return allIndices; } public String[] getConcreteAllOpenIndices() { return allOpenIndices; } - public String[] concreteAllClosedIndices() { - return allClosedIndices; - } - public String[] getConcreteAllClosedIndices() { return allClosedIndices; } @@ -455,7 +448,28 @@ public class MetaData implements Iterable, Diffable, Fr } public IndexMetaData index(Index index) { - return index(index.getName()); + IndexMetaData metaData = index(index.getName()); + if (metaData != null && metaData.getIndexUUID().equals(index.getUUID())) { + return metaData; + } + return null; + } + + /** + * Returns the {@link IndexMetaData} for this index. + * @throws IndexNotFoundException if no metadata for this index is found + */ + public IndexMetaData getIndexSafe(Index index) { + IndexMetaData metaData = index(index.getName()); + if (metaData != null) { + if(metaData.getIndexUUID().equals(index.getUUID())) { + return metaData; + } + throw new IndexNotFoundException(index, + new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID() + + "] but got: [" + metaData.getIndexUUID() +"]")); + } + throw new IndexNotFoundException(index); } public ImmutableOpenMap indices() { @@ -486,20 +500,13 @@ public class MetaData implements Iterable, Diffable, Fr return (T) customs.get(type); } - public int totalNumberOfShards() { + + public int getTotalNumberOfShards() { return this.totalNumberOfShards; } - public int getTotalNumberOfShards() { - return totalNumberOfShards(); - } - - public int numberOfShards() { - return this.numberOfShards; - } - public int getNumberOfShards() { - return numberOfShards(); + return this.numberOfShards; } /** @@ -731,7 +738,7 @@ public class MetaData implements Iterable, Diffable, Fr InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); + ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't * specify a unit. */ @@ -779,9 +786,9 @@ public class MetaData implements Iterable, Diffable, Fr metaData.getIndices(), metaData.getTemplates(), metaData.getCustoms(), - metaData.concreteAllIndices(), - metaData.concreteAllOpenIndices(), - metaData.concreteAllClosedIndices(), + metaData.getConcreteAllIndices(), + metaData.getConcreteAllOpenIndices(), + metaData.getConcreteAllClosedIndices(), metaData.getAliasAndIndexLookup()); } else { // No changes: @@ -842,6 +849,19 @@ public class MetaData implements Iterable, Diffable, Fr return indices.get(index); } + public IndexMetaData getSafe(Index index) { + IndexMetaData indexMetaData = get(index.getName()); + if (indexMetaData != null) { + if(indexMetaData.getIndexUUID().equals(index.getUUID())) { + return indexMetaData; + } + throw new IndexNotFoundException(index, + new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID() + + "] but got: [" + indexMetaData.getIndexUUID() +"]")); + } + throw new IndexNotFoundException(index); + } + public Builder remove(String index) { indices.remove(index); return this; @@ -1143,4 +1163,28 @@ public class MetaData implements Iterable, Diffable, Fr return PROTO.readFrom(in); } } + + private final static ToXContent.Params FORMAT_PARAMS; + static { + Map params = new HashMap<>(2); + params.put("binary", "true"); + params.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new MapParams(params); + } + + /** + * State format for {@link MetaData} to write to and load from disk + */ + public final static MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) { + + @Override + public void toXContent(XContentBuilder builder, MetaData state) throws IOException { + Builder.toXContent(state, builder, FORMAT_PARAMS); + } + + @Override + public MetaData fromXContent(XContentParser parser) throws IOException { + return Builder.fromXContent(parser); + } + }; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 62f3ad802a0..e0db19cb516 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; @@ -39,6 +38,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; @@ -53,6 +53,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; @@ -188,7 +189,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) throws Exception { - boolean indexCreated = false; + Index createdIndex = null; String removalReason = null; try { validate(request, currentState); @@ -308,10 +309,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { // Set up everything, now locally create the index to see that things are ok, and apply final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); // create the index here (on the master) to validate it can be created, as well as adding the mapping - indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); - indexCreated = true; + final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList()); + createdIndex = indexService.index(); // now add the mappings - IndexService indexService = indicesService.indexServiceSafe(request.index()); MapperService mapperService = indexService.mapperService(); // first, add the default mapping if (mappings.containsKey(MapperService.DEFAULT_MAPPING)) { @@ -415,9 +415,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { removalReason = "cleaning up after validating index on master"; return updatedState; } finally { - if (indexCreated) { + if (createdIndex != null) { // Index was already partially created - need to clean up - indicesService.removeIndex(request.index(), removalReason != null ? removalReason : "failed to create index"); + indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index"); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 54c014fb4ed..5e6d35aacfe 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; @@ -28,20 +27,22 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; +import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Arrays; -import java.util.Collection; +import java.util.Set; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; /** * @@ -67,10 +68,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent { } public void deleteIndices(final Request request, final Listener userListener) { - Collection indices = Arrays.asList(request.indices); final DeleteIndexListener listener = new DeleteIndexListener(userListener); - clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) { + clusterService.submitStateUpdateTask("delete-index " + request.indices, new ClusterStateUpdateTask(Priority.URGENT) { @Override public TimeValue timeout() { @@ -84,20 +84,21 @@ public class MetaDataDeleteIndexService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { + final MetaData meta = currentState.metaData(); + final Set metaDatas = request.indices.stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet()); + // Check if index deletion conflicts with any running snapshots + SnapshotsService.checkIndexDeletion(currentState, metaDatas); + final Set indices = request.indices; RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); - MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); + MetaData.Builder metaDataBuilder = MetaData.builder(meta); ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); - for (final String index: indices) { - if (!currentState.metaData().hasConcreteIndex(index)) { - throw new IndexNotFoundException(index); - } - + for (final Index index : indices) { + String indexName = index.getName(); logger.debug("[{}] deleting index", index); - - routingTableBuilder.remove(index); - clusterBlocksBuilder.removeIndexBlocks(index); - metaDataBuilder.remove(index); + routingTableBuilder.remove(indexName); + clusterBlocksBuilder.removeIndexBlocks(indexName); + metaDataBuilder.remove(indexName); } // wait for events from all nodes that it has been removed from their respective metadata... int count = currentState.nodes().size(); @@ -108,7 +109,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { // this listener will be notified once we get back a notification based on the cluster state change below. final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() { @Override - public void onNodeIndexDeleted(String deleted, String nodeId) { + public void onNodeIndexDeleted(Index deleted, String nodeId) { if (indices.contains(deleted)) { if (counter.decrementAndGet() == 0) { listener.onResponse(new Response(true)); @@ -118,7 +119,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent { } @Override - public void onNodeIndexStoreDeleted(String deleted, String nodeId) { + public void onNodeIndexStoreDeleted(Index deleted, String nodeId) { if (indices.contains(deleted)) { if (counter.decrementAndGet() == 0) { listener.onResponse(new Response(true)); @@ -183,12 +184,12 @@ public class MetaDataDeleteIndexService extends AbstractComponent { public static class Request { - final String[] indices; + final Set indices; TimeValue timeout = TimeValue.timeValueSeconds(10); TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; - public Request(String[] indices) { + public Request(Set indices) { this.indices = indices; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 52154bd2c04..e39b86a1611 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -23,14 +23,15 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -74,7 +75,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { @Override public ClusterState execute(final ClusterState currentState) { - List indicesToClose = new ArrayList<>(); + List indicesToClose = new ArrayList<>(); Map indices = new HashMap<>(); try { for (AliasAction aliasAction : request.actions()) { @@ -112,7 +113,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex()); continue; } - indicesToClose.add(indexMetaData.getIndex().getName()); + indicesToClose.add(indexMetaData.getIndex()); } indices.put(indexMetaData.getIndex().getName(), indexService); } @@ -153,7 +154,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent { } return currentState; } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for alias processing"); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 6639f9bdbd6..e6e7084e4d9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -19,14 +19,12 @@ package org.elasticsearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -34,13 +32,17 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.snapshots.SnapshotsService; import java.util.ArrayList; import java.util.Arrays; @@ -60,10 +62,16 @@ public class MetaDataIndexStateService extends AbstractComponent { private final AllocationService allocationService; private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; + private final NodeServicesProvider nodeServiceProvider; + private final IndicesService indicesService; @Inject - public MetaDataIndexStateService(Settings settings, ClusterService clusterService, AllocationService allocationService, MetaDataIndexUpgradeService metaDataIndexUpgradeService) { + public MetaDataIndexStateService(Settings settings, ClusterService clusterService, AllocationService allocationService, + MetaDataIndexUpgradeService metaDataIndexUpgradeService, + NodeServicesProvider nodeServicesProvider, IndicesService indicesService) { super(settings); + this.nodeServiceProvider = nodeServicesProvider; + this.indicesService = indicesService; this.clusterService = clusterService; this.allocationService = allocationService; this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; @@ -83,15 +91,11 @@ public class MetaDataIndexStateService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) { - Set indicesToClose = new HashSet<>(); - for (String index : request.indices()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } - + Set indicesToClose = new HashSet<>(); + for (Index index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { - indicesToClose.add(index); + indicesToClose.add(indexMetaData); } } @@ -99,43 +103,26 @@ public class MetaDataIndexStateService extends AbstractComponent { return currentState; } - // Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index - // is found as closing an index that is being restored makes the index unusable (it cannot be recovered). - RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE); - if (restore != null) { - Set indicesToFail = null; - for (RestoreInProgress.Entry entry : restore.entries()) { - for (ObjectObjectCursor shard : entry.shards()) { - if (!shard.value.state().completed()) { - if (indicesToClose.contains(shard.key.getIndexName())) { - if (indicesToFail == null) { - indicesToFail = new HashSet<>(); - } - indicesToFail.add(shard.key.getIndexName()); - } - } - } - } - if (indicesToFail != null) { - throw new IllegalArgumentException("Cannot close indices that are being restored: " + indicesToFail); - } - } - + // Check if index closing conflicts with any running restores + RestoreService.checkIndexClosing(currentState, indicesToClose); + // Check if index closing conflicts with any running snapshots + SnapshotsService.checkIndexClosing(currentState, indicesToClose); logger.info("closing indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); - for (String index : indicesToClose) { - mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE)); - blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK); + for (IndexMetaData openIndexMetadata : indicesToClose) { + final String indexName = openIndexMetadata.getIndex().getName(); + mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE)); + blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK); } ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); - for (String index : indicesToClose) { - rtBuilder.remove(index); + for (IndexMetaData index : indicesToClose) { + rtBuilder.remove(index.getIndex().getName()); } RoutingAllocation.Result routingResult = allocationService.reroute( @@ -161,14 +148,11 @@ public class MetaDataIndexStateService extends AbstractComponent { @Override public ClusterState execute(ClusterState currentState) { - List indicesToOpen = new ArrayList<>(); - for (String index : request.indices()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } + List indicesToOpen = new ArrayList<>(); + for (Index index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); if (indexMetaData.getState() != IndexMetaData.State.OPEN) { - indicesToOpen.add(index); + indicesToOpen.add(indexMetaData); } } @@ -181,20 +165,27 @@ public class MetaDataIndexStateService extends AbstractComponent { MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); - for (String index : indicesToOpen) { - IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build(); + for (IndexMetaData closedMetaData : indicesToOpen) { + final String indexName = closedMetaData.getIndex().getName(); + IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build(); // The index might be closed because we couldn't import it due to old incompatible version // We need to check that this index can be upgraded to the current version indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData); + try { + indicesService.verifyIndexMetadata(nodeServiceProvider, indexMetaData); + } catch (Exception e) { + throw new ElasticsearchException("Failed to verify index " + indexMetaData.getIndex(), e); + } + mdBuilder.put(indexMetaData, true); - blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK); + blocksBuilder.removeIndexBlock(indexName, INDEX_CLOSED_BLOCK); } ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable()); - for (String index : indicesToOpen) { - rtBuilder.addAsFromCloseToOpen(updatedState.metaData().index(index)); + for (IndexMetaData index : indicesToOpen) { + rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex())); } RoutingAllocation.Result routingResult = allocationService.reroute( diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index da2fc064dc4..1206185a609 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -21,9 +21,9 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 8bbd6f09d7e..3101a2c04cc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -20,15 +20,12 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.misc.IndexMergeTool; import org.elasticsearch.Version; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; @@ -36,10 +33,7 @@ import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.mapper.MapperRegistry; import java.util.Collections; -import java.util.Map; -import java.util.Set; -import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.common.util.set.Sets.newHashSet; /** @@ -53,13 +47,13 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; public class MetaDataIndexUpgradeService extends AbstractComponent { private final MapperRegistry mapperRegistry; - private final IndexScopedSettings indexScopedSettigns; + private final IndexScopedSettings indexScopedSettings; @Inject public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings) { super(settings); this.mapperRegistry = mapperRegistry; - this.indexScopedSettigns = indexScopedSettings; + this.indexScopedSettings = indexScopedSettings; } /** @@ -182,39 +176,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { } } - private static final String ARCHIVED_SETTINGS_PREFIX = "archived."; - IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { - Settings settings = indexMetaData.getSettings(); - Settings.Builder builder = Settings.builder(); - boolean changed = false; - for (Map.Entry entry : settings.getAsMap().entrySet()) { - try { - Setting setting = indexScopedSettigns.get(entry.getKey()); - if (setting != null) { - setting.get(settings); - builder.put(entry.getKey(), entry.getValue()); - } else { - if (indexScopedSettigns.isPrivateSetting(entry.getKey()) || entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX)) { - builder.put(entry.getKey(), entry.getValue()); - } else { - changed = true; - logger.warn("[{}] found unknown index setting: {} value: {} - archiving", indexMetaData.getIndex(), entry.getKey(), entry.getValue()); - // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there - // but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them. - builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue()); - } - } - } catch (IllegalArgumentException ex) { - changed = true; - logger.warn("[{}] found invalid index setting: {} value: {} - archiving",ex, indexMetaData.getIndex(), entry.getKey(), entry.getValue()); - // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there - // but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them. - builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue()); - } + final Settings settings = indexMetaData.getSettings(); + final Settings upgrade = indexScopedSettings.archiveUnknownOrBrokenSettings(settings); + if (upgrade != settings) { + return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); + } else { + return indexMetaData; } - - return changed ? IndexMetaData.builder(indexMetaData).settings(builder.build()).build() : indexMetaData; } - } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index c06a5cc7c1c..cafdc4581a1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -23,27 +23,28 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateTaskListener; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; -import org.elasticsearch.percolator.PercolatorService; import java.io.IOException; import java.util.ArrayList; @@ -112,13 +113,13 @@ public class MetaDataMappingService extends AbstractComponent { MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); for (Map.Entry> entry : tasksPerIndex.entrySet()) { - String index = entry.getKey(); - IndexMetaData indexMetaData = mdBuilder.get(index); + IndexMetaData indexMetaData = mdBuilder.get(entry.getKey()); if (indexMetaData == null) { // index got deleted on us, ignore... - logger.debug("[{}] ignoring tasks - index meta data doesn't exist", index); + logger.debug("[{}] ignoring tasks - index meta data doesn't exist", entry.getKey()); continue; } + final Index index = indexMetaData.getIndex(); // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the latest (based on order) update mapping one per node List allIndexTasks = entry.getValue(); @@ -127,7 +128,7 @@ public class MetaDataMappingService extends AbstractComponent { if (indexMetaData.isSameUUID(task.indexUUID)) { hasTaskWithRightUUID = true; } else { - logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); + logger.debug("{} ignoring task [{}] - index meta data doesn't match task uuid", index, task); } } if (hasTaskWithRightUUID == false) { @@ -136,7 +137,7 @@ public class MetaDataMappingService extends AbstractComponent { // construct the actual index if needed, and make sure the relevant mappings are there boolean removeIndex = false; - IndexService indexService = indicesService.indexService(index); + IndexService indexService = indicesService.indexService(indexMetaData.getIndex()); if (indexService == null) { // we need to create the index here, and add the current mapping to it, so we can merge indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); @@ -208,38 +209,38 @@ public class MetaDataMappingService extends AbstractComponent { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - Set indicesToClose = new HashSet<>(); + public BatchResult execute(ClusterState currentState, + List tasks) throws Exception { + Set indicesToClose = new HashSet<>(); BatchResult.Builder builder = BatchResult.builder(); try { // precreate incoming indices; for (PutMappingClusterStateUpdateRequest request : tasks) { - // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up - for (String index : request.indices()) { - final IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData != null && indicesService.hasIndex(index) == false) { - // if we don't have the index, we will throw exceptions later; - indicesToClose.add(index); - IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList()); - // add mappings for all types, we need them for cross-type validation - for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + try { + for (Index index : request.indices()) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); + if (indicesService.hasIndex(indexMetaData.getIndex()) == false) { + // if the index does not exists we create it once, add all types to the mapper service and + // close it later once we are done with mapping update + indicesToClose.add(indexMetaData.getIndex()); + IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, + Collections.emptyList()); + // add mappings for all types, we need them for cross-type validation + for (ObjectCursor mapping : indexMetaData.getMappings().values()) { + indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), + MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); + } } } - } - } - for (PutMappingClusterStateUpdateRequest request : tasks) { - try { currentState = applyRequest(currentState, request); builder.success(request); } catch (Throwable t) { builder.failure(request, t); } } - return builder.build(currentState); } finally { - for (String index : indicesToClose) { + for (Index index : indicesToClose) { indicesService.removeIndex(index, "created for mapping processing"); } } @@ -248,8 +249,17 @@ public class MetaDataMappingService extends AbstractComponent { private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { String mappingType = request.type(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); - for (String index : request.indices()) { + final MetaData metaData = currentState.metaData(); + final List> updateList = new ArrayList<>(); + for (Index index : request.indices()) { IndexService indexService = indicesService.indexServiceSafe(index); + // IMPORTANT: always get the metadata from the state since it get's batched + // and if we pull it from the indexService we might miss an update etc. + final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index); + + // this is paranoia... just to be sure we use the exact same indexService and metadata tuple on the update that + // we used for the validation, it makes this mechanism little less scary (a little) + updateList.add(new Tuple<>(indexService, indexMetaData)); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); @@ -270,7 +280,6 @@ public class MetaDataMappingService extends AbstractComponent { // and a put mapping api call, so we don't which type did exist before. // Also the order of the mappings may be backwards. if (newMapper.parentFieldMapper().active()) { - IndexMetaData indexMetaData = currentState.metaData().index(index); for (ObjectCursor mapping : indexMetaData.getMappings().values()) { if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) { throw new IllegalArgumentException("can't add a _parent field that points to an already existing type"); @@ -287,17 +296,16 @@ public class MetaDataMappingService extends AbstractComponent { } assert mappingType != null; - if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { + if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorFieldMapper.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } - MetaData.Builder builder = MetaData.builder(currentState.metaData()); - for (String index : request.indices()) { + MetaData.Builder builder = MetaData.builder(metaData); + for (Tuple toUpdate : updateList) { // do the actual merge here on the master, and update the mapping source - IndexService indexService = indicesService.indexService(index); - if (indexService == null) { - continue; - } - + // we use the exact same indexService and metadata we used to validate above here to actually apply the update + final IndexService indexService = toUpdate.v1(); + final IndexMetaData indexMetaData = toUpdate.v2(); + final Index index = indexMetaData.getIndex(); CompressedXContent existingSource = null; DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); if (existingMapper != null) { @@ -312,24 +320,20 @@ public class MetaDataMappingService extends AbstractComponent { } else { // use the merged mapping source if (logger.isDebugEnabled()) { - logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); + logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] update_mapping [{}]", index, mergedMapper.type()); + logger.info("{} update_mapping [{}]", index, mergedMapper.type()); } } } else { if (logger.isDebugEnabled()) { - logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource); + logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] create_mapping [{}]", index, mappingType); + logger.info("{} create_mapping [{}]", index, mappingType); } } - IndexMetaData indexMetaData = currentState.metaData().index(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 2d7ba4c3c05..c925b43056b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -23,10 +23,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; @@ -35,6 +33,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; @@ -43,7 +42,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.Index; import java.util.ArrayList; import java.util.HashMap; @@ -86,7 +85,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // we will want to know this for translating "all" to a number final int dataNodeCount = event.state().nodes().dataNodes().size(); - Map> nrReplicasChanged = new HashMap<>(); + Map> nrReplicasChanged = new HashMap<>(); // we need to do this each time in case it was changed by update settings for (final IndexMetaData indexMetaData : event.state().metaData()) { AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings()); @@ -117,7 +116,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements nrReplicasChanged.put(numberOfReplicas, new ArrayList<>()); } - nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex().getName()); + nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex()); } } } @@ -126,25 +125,25 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // update settings and kick of a reroute (implicit) for them to take effect for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) { Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build(); - final List indices = nrReplicasChanged.get(fNumberOfReplicas); + final List indices = nrReplicasChanged.get(fNumberOfReplicas); UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest() - .indices(indices.toArray(new String[indices.size()])).settings(settings) + .indices(indices.toArray(new Index[indices.size()])).settings(settings) .ackTimeout(TimeValue.timeValueMillis(0)) //no need to wait for ack here .masterNodeTimeout(TimeValue.timeValueMinutes(10)); updateSettings(updateRequest, new ActionListener() { @Override public void onResponse(ClusterStateUpdateResponse response) { - for (String index : indices) { - logger.info("[{}] auto expanded replicas to [{}]", index, fNumberOfReplicas); + for (Index index : indices) { + logger.info("{} auto expanded replicas to [{}]", index, fNumberOfReplicas); } } @Override public void onFailure(Throwable t) { - for (String index : indices) { - logger.warn("[{}] fail to auto expand replicas to [{}]", index, fNumberOfReplicas); + for (Index index : indices) { + logger.warn("{} fail to auto expand replicas to [{}]", index, fNumberOfReplicas); } } }); @@ -177,6 +176,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements final Settings skippedSettigns = skipppedSettings.build(); final Settings closedSettings = settingsForClosedIndices.build(); final Settings openSettings = settingsForOpenIndices.build(); + final boolean preserveExisting = request.isPreserveExisting(); clusterService.submitStateUpdateTask("update-settings", new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @@ -188,16 +188,19 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements @Override public ClusterState execute(ClusterState currentState) { - String[] actualIndices = indexNameExpressionResolver.concreteIndices(currentState, IndicesOptions.strictExpand(), request.indices()); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); // allow to change any settings to a close index, and only allow dynamic settings to be changed // on an open index - Set openIndices = new HashSet<>(); - Set closeIndices = new HashSet<>(); - for (String index : actualIndices) { - if (currentState.metaData().index(index).getState() == IndexMetaData.State.OPEN) { + Set openIndices = new HashSet<>(); + Set closeIndices = new HashSet<>(); + final String[] actualIndices = new String[request.indices().length]; + for (int i = 0; i < request.indices().length; i++) { + Index index = request.indices()[i]; + actualIndices[i] = index.getName(); + final IndexMetaData metaData = currentState.metaData().getIndexSafe(index); + if (metaData.getState() == IndexMetaData.State.OPEN) { openIndices.add(index); } else { closeIndices.add(index); @@ -206,20 +209,20 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) { throw new IllegalArgumentException(String.format(Locale.ROOT, - "Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS, + "Can't update [%s] on closed indices %s - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS, closeIndices )); } if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) { throw new IllegalArgumentException(String.format(Locale.ROOT, - "Can't update non dynamic settings[%s] for open indices [%s]", + "Can't update non dynamic settings [%s] for open indices %s", skippedSettigns.getAsMap().keySet(), openIndices )); } int updatedNumberOfReplicas = openSettings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, -1); - if (updatedNumberOfReplicas != -1) { + if (updatedNumberOfReplicas != -1 && preserveExisting == false) { routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); metaDataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); @@ -232,28 +235,28 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings); if (!openIndices.isEmpty()) { - for (String index : openIndices) { - IndexMetaData indexMetaData = metaDataBuilder.get(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } + for (Index index : openIndices) { + IndexMetaData indexMetaData = metaDataBuilder.getSafe(index); Settings.Builder updates = Settings.builder(); Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()); - if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) { + if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index.getName())) { + if (preserveExisting) { + indexSettings.put(indexMetaData.getSettings()); + } metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings)); } } } if (!closeIndices.isEmpty()) { - for (String index : closeIndices) { - IndexMetaData indexMetaData = metaDataBuilder.get(index); - if (indexMetaData == null) { - throw new IndexNotFoundException(index); - } + for (Index index : closeIndices) { + IndexMetaData indexMetaData = metaDataBuilder.getSafe(index); Settings.Builder updates = Settings.builder(); Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()); - if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) { + if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index.getName())) { + if (preserveExisting) { + indexSettings.put(indexMetaData.getSettings()); + } metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings)); } } @@ -265,11 +268,11 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // now, reroute in case things change that require it (like number of replicas) RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update"); updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build(); - for (String index : openIndices) { - indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings()); + for (Index index : openIndices) { + indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings()); } - for (String index : closeIndices) { - indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings()); + for (Index index : closeIndices) { + indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings()); } return updatedState; } diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java index 83f603d2890..ccd30a99e9c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeService.java @@ -19,24 +19,41 @@ package org.elasticsearch.cluster.node; +import org.elasticsearch.Version; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.CopyOnWriteArrayList; /** */ public class DiscoveryNodeService extends AbstractComponent { + public static final Setting NODE_ID_SEED_SETTING = + // don't use node.id.seed so it won't be seen as an attribute + Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope); private final List customAttributesProviders = new CopyOnWriteArrayList<>(); + private final Version version; @Inject - public DiscoveryNodeService(Settings settings) { + public DiscoveryNodeService(Settings settings, Version version) { super(settings); + this.version = version; + } + + public static String generateNodeId(Settings settings) { + Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); + return Strings.randomBase64UUID(random); } public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) { @@ -44,7 +61,7 @@ public class DiscoveryNodeService extends AbstractComponent { return this; } - public Map buildAttributes() { + public DiscoveryNode buildLocalNode(TransportAddress publishAddress) { Map attributes = new HashMap<>(settings.getByPrefix("node.").getAsMap()); attributes.remove("name"); // name is extracted in other places if (attributes.containsKey("client")) { @@ -76,10 +93,11 @@ public class DiscoveryNodeService extends AbstractComponent { } } - return attributes; + final String nodeId = generateNodeId(settings); + return new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, attributes, version); } - public static interface CustomAttributesProvider { + public interface CustomAttributesProvider { Map buildAttributes(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 160ccbf06b3..c32d9de363d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -313,7 +313,7 @@ public class IndexRoutingTable extends AbstractDiffable imple @Override public IndexRoutingTable readFrom(StreamInput in) throws IOException { - Index index = Index.readIndex(in); + Index index = new Index(in); Builder builder = new Builder(index); int size = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index bda0a24c9a4..e64f8f5d77c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -584,7 +584,7 @@ public class IndexShardRoutingTable implements Iterable { } public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { - Index index = Index.readIndex(in); + Index index = new Index(in); return readFromThin(in, index); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 4f2f9d06097..a6ef564904c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -597,6 +597,13 @@ public class RoutingNodes implements Iterable { } + /** + * Returns the number of routing nodes + */ + public int size() { + return nodesToShards.size(); + } + public static final class UnassignedShards implements Iterable { private final RoutingNodes nodes; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index c683f0200dc..90565a6569d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -20,12 +20,12 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index a34405c09e0..c27e0a9beb1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -586,10 +586,6 @@ public class RoutingTable implements Iterable, Diffable indexRoutingTable : indicesRouting.values()) { - indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value); - } RoutingTable table = new RoutingTable(version, indicesRouting.build()); indicesRouting = null; return table; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index cfa33e4f225..269a238e710 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -266,7 +266,7 @@ public final class ShardRouting implements Streamable, ToXContent { return false; } - if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0)) { + if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1)) { // when no shards with this id have ever been active for this index return false; } @@ -328,7 +328,7 @@ public final class ShardRouting implements Streamable, ToXContent { @Override public void readFrom(StreamInput in) throws IOException { - readFrom(in, Index.readIndex(in), in.readVInt()); + readFrom(in, new Index(in), in.readVInt()); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 714c1e4913a..be7d90a1fef 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -44,7 +45,9 @@ public class UnassignedInfo implements ToXContent, Writeable { public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime"); private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1); - public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX); + public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = + Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic, + Property.IndexScope); /** * Reason why the shard is in unassigned state. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index eeeb6e3389c..da0fea69c68 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -36,13 +35,14 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.shard.ShardId; import java.util.ArrayList; import java.util.Collections; @@ -63,14 +63,17 @@ import java.util.stream.Collectors; public class AllocationService extends AbstractComponent { private final AllocationDeciders allocationDeciders; + private final GatewayAllocator gatewayAllocator; + private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; - private final ShardsAllocators shardsAllocators; @Inject - public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) { + public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, + ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { super(settings); this.allocationDeciders = allocationDeciders; - this.shardsAllocators = shardsAllocators; + this.gatewayAllocator = gatewayAllocator; + this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; } @@ -92,11 +95,11 @@ public class AllocationService extends AbstractComponent { if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } - shardsAllocators.applyStartedShards(allocation); + gatewayAllocator.applyStartedShards(allocation); if (withReroute) { reroute(allocation); } - final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes); + final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes); String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString()); logClusterHealthStateChange( @@ -105,37 +108,44 @@ public class AllocationService extends AbstractComponent { "shards started [" + startedShardsAsString + "] ..." ); return result; - } - - - protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) { - return buildChangedResult(metaData, routingNodes, new RoutingExplanations()); } - protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) { - final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build(); - MetaData newMetaData = updateMetaDataWithRoutingTable(metaData,routingTable); - return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations); + + protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes) { + return buildChangedResult(oldMetaData, oldRoutingTable, newRoutingNodes, new RoutingExplanations()); + + } + + protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes, + RoutingExplanations explanations) { + final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(newRoutingNodes).build(); + MetaData newMetaData = updateMetaDataWithRoutingTable(oldMetaData, oldRoutingTable, newRoutingTable); + return new RoutingAllocation.Result(true, newRoutingTable.validateRaiseException(newMetaData), newMetaData, explanations); } /** - * Updates the current {@link MetaData} based on the newly created {@link RoutingTable}. + * Updates the current {@link MetaData} based on the newly created {@link RoutingTable}. Specifically + * we update {@link IndexMetaData#getActiveAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on + * the changes made during this allocation. * - * @param currentMetaData {@link MetaData} object from before the routing table was changed. + * @param oldMetaData {@link MetaData} object from before the routing table was changed. + * @param oldRoutingTable {@link RoutingTable} from before the change. * @param newRoutingTable new {@link RoutingTable} created by the allocation change * @return adapted {@link MetaData}, potentially the original one if no change was needed. */ - static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) { - // make sure index meta data and routing tables are in sync w.r.t active allocation ids + static MetaData updateMetaDataWithRoutingTable(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingTable newRoutingTable) { MetaData.Builder metaDataBuilder = null; - for (IndexRoutingTable indexRoutingTable : newRoutingTable) { - final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex()); - if (indexMetaData == null) { - throw new IllegalStateException("no metadata found for index " + indexRoutingTable.getIndex().getName()); + for (IndexRoutingTable newIndexTable : newRoutingTable) { + final IndexMetaData oldIndexMetaData = oldMetaData.index(newIndexTable.getIndex()); + if (oldIndexMetaData == null) { + throw new IllegalStateException("no metadata found for index " + newIndexTable.getIndex().getName()); } IndexMetaData.Builder indexMetaDataBuilder = null; - for (IndexShardRoutingTable shardRoutings : indexRoutingTable) { - Set activeAllocationIds = shardRoutings.activeShards().stream() + for (IndexShardRoutingTable newShardTable : newIndexTable) { + final ShardId shardId = newShardTable.shardId(); + + // update activeAllocationIds + Set activeAllocationIds = newShardTable.activeShards().stream() .map(ShardRouting::allocationId) .filter(Objects::nonNull) .map(AllocationId::getId) @@ -143,19 +153,44 @@ public class AllocationService extends AbstractComponent { // only update active allocation ids if there is an active shard if (activeAllocationIds.isEmpty() == false) { // get currently stored allocation ids - Set storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id()); + Set storedAllocationIds = oldIndexMetaData.activeAllocationIds(shardId.id()); if (activeAllocationIds.equals(storedAllocationIds) == false) { if (indexMetaDataBuilder == null) { - indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); + indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData); } - - indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds); + indexMetaDataBuilder.putActiveAllocationIds(shardId.id(), activeAllocationIds); } } + + // update primary terms + final ShardRouting newPrimary = newShardTable.primaryShard(); + if (newPrimary == null) { + throw new IllegalStateException("missing primary shard for " + newShardTable.shardId()); + } + final ShardRouting oldPrimary = oldRoutingTable.shardRoutingTable(shardId).primaryShard(); + if (oldPrimary == null) { + throw new IllegalStateException("missing primary shard for " + newShardTable.shardId()); + } + // we update the primary term on initial assignment or when a replica is promoted. Most notably we do *not* + // update them when a primary relocates + if (newPrimary.unassigned() || + newPrimary.isSameAllocation(oldPrimary) || + // we do not use newPrimary.isTargetRelocationOf(oldPrimary) because that one enforces newPrimary to + // be initializing. However, when the target shard is activated, we still want the primary term to staty + // the same + (oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.buildTargetRelocatingShard()))) { + // do nothing + } else { + // incrementing the primary term + if (indexMetaDataBuilder == null) { + indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData); + } + indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1); + } } if (indexMetaDataBuilder != null) { if (metaDataBuilder == null) { - metaDataBuilder = MetaData.builder(currentMetaData); + metaDataBuilder = MetaData.builder(oldMetaData); } metaDataBuilder.put(indexMetaDataBuilder); } @@ -163,7 +198,7 @@ public class AllocationService extends AbstractComponent { if (metaDataBuilder != null) { return metaDataBuilder.build(); } else { - return currentMetaData; + return oldMetaData; } } @@ -192,9 +227,9 @@ public class AllocationService extends AbstractComponent { if (!changed) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } - shardsAllocators.applyFailedShards(allocation); + gatewayAllocator.applyFailedShards(allocation); reroute(allocation); - final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes); + final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes); String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString()); logClusterHealthStateChange( new ClusterStateHealth(clusterState), @@ -241,7 +276,7 @@ public class AllocationService extends AbstractComponent { // the assumption is that commands will move / act on shards (or fail through exceptions) // so, there will always be shard "movements", so no need to check on reroute reroute(allocation); - RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations); + RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes, explanations); logClusterHealthStateChange( new ClusterStateHealth(clusterState), new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()), @@ -250,6 +285,7 @@ public class AllocationService extends AbstractComponent { return result; } + /** * Reroutes the routing table based on the live nodes. *

@@ -273,7 +309,7 @@ public class AllocationService extends AbstractComponent { if (!reroute(allocation)) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } - RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes); + RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes); logClusterHealthStateChange( new ClusterStateHealth(clusterState), new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()), @@ -306,14 +342,10 @@ public class AllocationService extends AbstractComponent { if (allocation.routingNodes().unassigned().size() > 0) { updateLeftDelayOfUnassignedShards(allocation, settings); - changed |= shardsAllocators.allocateUnassigned(allocation); + changed |= gatewayAllocator.allocateUnassigned(allocation); } - // move shards that no longer can be allocated - changed |= shardsAllocators.moveShards(allocation); - - // rebalance - changed |= shardsAllocators.rebalance(allocation); + changed |= shardsAllocator.allocate(allocation); assert RoutingNodes.assertShardStats(allocation.routingNodes()); return changed; } @@ -322,7 +354,7 @@ public class AllocationService extends AbstractComponent { public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) { for (ShardRouting shardRouting : allocation.routingNodes().unassigned()) { final MetaData metaData = allocation.metaData(); - final IndexMetaData indexMetaData = metaData.index(shardRouting.index()); + final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index()); shardRouting.unassignedInfo().updateDelay(allocation.getCurrentNanoTime(), settings, indexMetaData.getSettings()); } } @@ -342,7 +374,6 @@ public class AllocationService extends AbstractComponent { changed |= failReplicasForUnassignedPrimary(allocation, shardEntry); ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry); if (candidate != null) { - IndexMetaData index = allocation.metaData().index(candidate.index()); routingNodes.swapPrimaryFlag(shardEntry, candidate); if (candidate.relocatingNodeId() != null) { changed = true; @@ -357,6 +388,7 @@ public class AllocationService extends AbstractComponent { } } } + IndexMetaData index = allocation.metaData().getIndexSafe(candidate.index()); if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) { routingNodes.reinitShadowPrimary(candidate); changed = true; @@ -414,8 +446,8 @@ public class AllocationService extends AbstractComponent { boolean changed = false; for (ShardRouting routing : replicas) { changed |= applyFailedShard(allocation, routing, false, - new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing", - null, allocation.getCurrentNanoTime(), System.currentTimeMillis())); + new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing", + null, allocation.getCurrentNanoTime(), System.currentTimeMillis())); } return changed; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index 4e6ba0fb5ad..536806c0830 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -44,7 +44,7 @@ import static java.util.Collections.unmodifiableSet; public class RoutingAllocation { /** - * this class is used to describe results of a {@link RoutingAllocation} + * this class is used to describe results of a {@link RoutingAllocation} */ public static class Result { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 0c40b26ca67..8102f206799 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.allocator; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -28,9 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; @@ -39,21 +36,18 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; -import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.Predicate; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -74,9 +68,13 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER); - public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER); - public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER); + public static final Setting INDEX_BALANCE_FACTOR_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope); + public static final Setting THRESHOLD_SETTING = + Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, + Property.Dynamic, Property.NodeScope); private volatile WeightFunction weightFunction; private volatile float threshold; @@ -103,27 +101,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { /* ONLY FOR GATEWAYS */ } - - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { + public boolean allocate(RoutingAllocation allocation) { + if (allocation.routingNodes().size() == 0) { + /* with no nodes this is pointless */ + return false; + } final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.allocateUnassigned(); - } - - @Override - public boolean rebalance(RoutingAllocation allocation) { - final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.balance(); - } - - @Override - public boolean moveShards(RoutingAllocation allocation) { - final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold); - return balancer.moveShards(); + boolean changed = balancer.allocateUnassigned(); + changed |= balancer.moveShards(); + changed |= balancer.balance(); + return changed; } /** @@ -203,8 +190,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } private float weight(Balancer balancer, ModelNode node, String index, int numAdditionalShards) { - final float weightShard = (node.numShards() + numAdditionalShards - balancer.avgShardsPerNode()); - final float weightIndex = (node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index)); + final float weightShard = node.numShards() + numAdditionalShards - balancer.avgShardsPerNode(); + final float weightIndex = node.numShards(index) + numAdditionalShards - balancer.avgShardsPerNode(index); return theta0 * weightShard + theta1 * weightIndex; } @@ -216,7 +203,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public static class Balancer { private final ESLogger logger; private final Map nodes = new HashMap<>(); - private final HashSet indices = new HashSet<>(); private final RoutingAllocation allocation; private final RoutingNodes routingNodes; private final WeightFunction weight; @@ -225,19 +211,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards private final MetaData metaData; private final float avgShardsPerNode; - private final Predicate assignedFilter = shard -> shard.assignedToNode(); - public Balancer(ESLogger logger, RoutingAllocation allocation, WeightFunction weight, float threshold) { this.logger = logger; this.allocation = allocation; this.weight = weight; this.threshold = threshold; this.routingNodes = allocation.routingNodes(); - for (RoutingNode node : routingNodes) { - nodes.put(node.nodeId(), new ModelNode(node.nodeId())); - } metaData = routingNodes.metaData(); - avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / nodes.size(); + avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size(); + buildModelFromAssigned(); } /** @@ -271,17 +253,6 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return new NodeSorter(nodesArray(), weight, this); } - private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards unassigned) { - if (logger.isTraceEnabled()) { - logger.trace("Start distributing Shards"); - } - for (ObjectCursor index : allocation.routingTable().indicesRouting().keys()) { - indices.add(index.value); - } - buildModelFromAssigned(routing.shards(assignedFilter)); - return allocateUnassigned(unassigned); - } - private static float absDelta(float lower, float higher) { assert higher >= lower : higher + " lt " + lower +" but was expected to be gte"; return Math.abs(higher - lower); @@ -295,12 +266,36 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } /** - * Allocates all possible unassigned shards + * Balances the nodes on the cluster model according to the weight function. + * The actual balancing is delegated to {@link #balanceByWeights()} + * * @return true if the current configuration has been * changed, otherwise false */ - final boolean allocateUnassigned() { - return balance(true); + private boolean balance() { + if (logger.isTraceEnabled()) { + logger.trace("Start balancing cluster"); + } + if (allocation.hasPendingAsyncFetch()) { + /* + * see https://github.com/elastic/elasticsearch/issues/14387 + * if we allow rebalance operations while we are still fetching shard store data + * we might end up with unnecessary rebalance operations which can be super confusion/frustrating + * since once the fetches come back we might just move all the shards back again. + * Therefore we only do a rebalance if we have fetched all information. + */ + logger.debug("skipping rebalance due to in-flight shard/store fetches"); + return false; + } + if (allocation.deciders().canRebalance(allocation).type() != Type.YES) { + logger.trace("skipping rebalance as it is disabled"); + return false; + } + if (nodes.size() < 2) { /* skip if we only have one node */ + logger.trace("skipping rebalance as single node only"); + return false; + } + return balanceByWeights(); } /** @@ -317,120 +312,100 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * @return true if the current configuration has been * changed, otherwise false */ - public boolean balance() { - return balance(false); - } + private boolean balanceByWeights() { + boolean changed = false; + final NodeSorter sorter = newNodeSorter(); + final AllocationDeciders deciders = allocation.deciders(); + final ModelNode[] modelNodes = sorter.modelNodes; + final float[] weights = sorter.weights; + for (String index : buildWeightOrderedIndices(sorter)) { + IndexMetaData indexMetaData = metaData.index(index); - private boolean balance(boolean onlyAssign) { - if (this.nodes.isEmpty()) { - /* with no nodes this is pointless */ - return false; - } - if (logger.isTraceEnabled()) { - if (onlyAssign) { - logger.trace("Start balancing cluster"); - } else { - logger.trace("Start assigning unassigned shards"); + // find nodes that have a shard of this index or where shards of this index are allowed to stay + // move these nodes to the front of modelNodes so that we can only balance based on these nodes + int relevantNodes = 0; + for (int i = 0; i < modelNodes.length; i++) { + ModelNode modelNode = modelNodes[i]; + if (modelNode.getIndex(index) != null + || deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(), allocation).type() != Type.NO) { + // swap nodes at position i and relevantNodes + modelNodes[i] = modelNodes[relevantNodes]; + modelNodes[relevantNodes] = modelNode; + relevantNodes++; + } } - } - final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - boolean changed = initialize(routingNodes, unassigned); - if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) { - NodeSorter sorter = newNodeSorter(); - if (nodes.size() > 1) { /* skip if we only have one node */ - AllocationDeciders deciders = allocation.deciders(); - final ModelNode[] modelNodes = sorter.modelNodes; - final float[] weights = sorter.weights; - for (String index : buildWeightOrderedIndices(sorter)) { - IndexMetaData indexMetaData = metaData.index(index); - // find nodes that have a shard of this index or where shards of this index are allowed to stay - // move these nodes to the front of modelNodes so that we can only balance based on these nodes - int relevantNodes = 0; - for (int i = 0; i < modelNodes.length; i++) { - ModelNode modelNode = modelNodes[i]; - if (modelNode.getIndex(index) != null - || deciders.canAllocate(indexMetaData, modelNode.getRoutingNode(routingNodes), allocation).type() != Type.NO) { - // swap nodes at position i and relevantNodes - modelNodes[i] = modelNodes[relevantNodes]; - modelNodes[relevantNodes] = modelNode; - relevantNodes++; + if (relevantNodes < 2) { + continue; + } + + sorter.reset(index, 0, relevantNodes); + int lowIdx = 0; + int highIdx = relevantNodes - 1; + while (true) { + final ModelNode minNode = modelNodes[lowIdx]; + final ModelNode maxNode = modelNodes[highIdx]; + advance_range: + if (maxNode.numShards(index) > 0) { + final float delta = absDelta(weights[lowIdx], weights[highIdx]); + if (lessThan(delta, threshold)) { + if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta? + && (absDelta(weights[0], weights[highIdx-1]) > threshold) // check if we need to break at all + ) { + /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible + * due to some allocation decider restrictions like zone awareness. if one zone has for instance + * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we + * can't move to the "lighter" shards since otherwise the zone would go over capacity. + * + * This break jumps straight to the condition below were we start moving from the high index towards + * the low index to shrink the window we are considering for balance from the other direction. + * (check shrinking the window from MAX to MIN) + * See #3580 + */ + break advance_range; } + if (logger.isTraceEnabled()) { + logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", + index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); + } + break; } - - if (relevantNodes < 2) { + if (logger.isTraceEnabled()) { + logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", + maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); + } + /* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes. + * a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */ + if (tryRelocateShard(minNode, maxNode, index, delta)) { + /* + * TODO we could be a bit smarter here, we don't need to fully sort necessarily + * we could just find the place to insert linearly but the win might be minor + * compared to the added complexity + */ + weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); + weights[highIdx] = sorter.weight(modelNodes[highIdx]); + sorter.sort(0, relevantNodes); + lowIdx = 0; + highIdx = relevantNodes - 1; + changed = true; continue; } - - sorter.reset(index, 0, relevantNodes); - int lowIdx = 0; - int highIdx = relevantNodes - 1; - while (true) { - final ModelNode minNode = modelNodes[lowIdx]; - final ModelNode maxNode = modelNodes[highIdx]; - advance_range: - if (maxNode.numShards(index) > 0) { - final float delta = absDelta(weights[lowIdx], weights[highIdx]); - if (lessThan(delta, threshold)) { - if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta? - && (absDelta(weights[0], weights[highIdx-1]) > threshold) // check if we need to break at all - ) { - /* This is a special case if allocations from the "heaviest" to the "lighter" nodes is not possible - * due to some allocation decider restrictions like zone awareness. if one zone has for instance - * less nodes than another zone. so one zone is horribly overloaded from a balanced perspective but we - * can't move to the "lighter" shards since otherwise the zone would go over capacity. - * - * This break jumps straight to the condition below were we start moving from the high index towards - * the low index to shrink the window we are considering for balance from the other direction. - * (check shrinking the window from MAX to MIN) - * See #3580 - */ - break advance_range; - } - if (logger.isTraceEnabled()) { - logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", - index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); - } - break; - } - if (logger.isTraceEnabled()) { - logger.trace("Balancing from node [{}] weight: [{}] to node [{}] weight: [{}] delta: [{}]", - maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); - } - /* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes. - * a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */ - if (tryRelocateShard(minNode, maxNode, index, delta)) { - /* - * TODO we could be a bit smarter here, we don't need to fully sort necessarily - * we could just find the place to insert linearly but the win might be minor - * compared to the added complexity - */ - weights[lowIdx] = sorter.weight(modelNodes[lowIdx]); - weights[highIdx] = sorter.weight(modelNodes[highIdx]); - sorter.sort(0, relevantNodes); - lowIdx = 0; - highIdx = relevantNodes - 1; - changed = true; - continue; - } - } - if (lowIdx < highIdx - 1) { - /* Shrinking the window from MIN to MAX - * we can't move from any shard from the min node lets move on to the next node - * and see if the threshold still holds. We either don't have any shard of this - * index on this node of allocation deciders prevent any relocation.*/ - lowIdx++; - } else if (lowIdx > 0) { - /* Shrinking the window from MAX to MIN - * now we go max to min since obviously we can't move anything to the max node - * lets pick the next highest */ - lowIdx = 0; - highIdx--; - } else { - /* we are done here, we either can't relocate anymore or we are balanced */ - break; - } - } + } + if (lowIdx < highIdx - 1) { + /* Shrinking the window from MIN to MAX + * we can't move from any shard from the min node lets move on to the next node + * and see if the threshold still holds. We either don't have any shard of this + * index on this node of allocation deciders prevent any relocation.*/ + lowIdx++; + } else if (lowIdx > 0) { + /* Shrinking the window from MAX to MIN + * now we go max to min since obviously we can't move anything to the max node + * lets pick the next highest */ + lowIdx = 0; + highIdx--; + } else { + /* we are done here, we either can't relocate anymore or we are balanced */ + break; } } } @@ -451,7 +426,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * to the nodes we relocated them from. */ private String[] buildWeightOrderedIndices(NodeSorter sorter) { - final String[] indices = this.indices.toArray(new String[this.indices.size()]); + final String[] indices = allocation.routingTable().indicesRouting().keys().toArray(String.class); final float[] deltas = new float[indices.length]; for (int i = 0; i < deltas.length; i++) { sorter.reset(indices[i]); @@ -503,20 +478,16 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * @return true if the allocation has changed, otherwise false */ public boolean moveShards() { - if (nodes.isEmpty()) { - /* with no nodes this is pointless */ - return false; - } - - // Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling + // Iterate over the started shards interleaving between nodes, and check if they can remain. In the presence of throttling // shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are // offloading the shards. - List shards = new ArrayList<>(); + boolean changed = false; int index = 0; boolean found = true; + final NodeSorter sorter = newNodeSorter(); while (found) { found = false; - for (RoutingNode routingNode : routingNodes) { + for (RoutingNode routingNode : allocation.routingNodes()) { if (index >= routingNode.size()) { continue; } @@ -524,64 +495,52 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards ShardRouting shardRouting = routingNode.get(index); // we can only move started shards... if (shardRouting.started()) { - shards.add(shardRouting); + final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); + assert sourceNode != null && sourceNode.containsShard(shardRouting); + Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); + if (decision.type() == Decision.Type.NO) { + changed |= moveShard(sorter, shardRouting, sourceNode, routingNode); + } } } index++; } - if (shards.isEmpty()) { - return false; - } - final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); - boolean changed = initialize(routingNodes, unassigned); - if (changed == false) { - final NodeSorter sorter = newNodeSorter(); - final ModelNode[] modelNodes = sorter.modelNodes; - for (ShardRouting shardRouting : shards) { - final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); - assert sourceNode != null && sourceNode.containsShard(shardRouting); - final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes); - Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation); - if (decision.type() == Decision.Type.NO) { - logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node()); - sorter.reset(shardRouting.getIndexName()); - /* - * the sorter holds the minimum weight node first for the shards index. - * We now walk through the nodes until we find a node to allocate the shard. - * This is not guaranteed to be balanced after this operation we still try best effort to - * allocate on the minimal eligible node. - */ - boolean moved = false; - for (ModelNode currentNode : modelNodes) { - if (currentNode == sourceNode) { - continue; - } - RoutingNode target = currentNode.getRoutingNode(routingNodes); - Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); - Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation); - if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too? - Decision sourceDecision = sourceNode.removeShard(shardRouting); - ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - // re-add (now relocating shard) to source node - sourceNode.addShard(shardRouting, sourceDecision); - Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); - currentNode.addShard(targetRelocatingShard, targetDecision); - if (logger.isTraceEnabled()) { - logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node()); - } - moved = true; - changed = true; - break; - } - } - if (moved == false) { - logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + return changed; + } + + /** + * Move started shard to the minimal eligible node with respect to the weight function + * + * @return true if the shard was moved successfully, otherwise false + */ + private boolean moveShard(NodeSorter sorter, ShardRouting shardRouting, ModelNode sourceNode, RoutingNode routingNode) { + logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node()); + sorter.reset(shardRouting.getIndexName()); + /* + * the sorter holds the minimum weight node first for the shards index. + * We now walk through the nodes until we find a node to allocate the shard. + * This is not guaranteed to be balanced after this operation we still try best effort to + * allocate on the minimal eligible node. + */ + for (ModelNode currentNode : sorter.modelNodes) { + if (currentNode != sourceNode) { + RoutingNode target = currentNode.getRoutingNode(); + Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation); + Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation); + if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too? + sourceNode.removeShard(shardRouting); + ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); + currentNode.addShard(targetRelocatingShard); + if (logger.isTraceEnabled()) { + logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node()); } + return true; } } } - return changed; + logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id()); + return false; } /** @@ -593,18 +552,19 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * on the target node which we respect during the allocation / balancing * process. In short, this method recreates the status-quo in the cluster. */ - private void buildModelFromAssigned(Iterable shards) { - for (ShardRouting shard : shards) { - assert shard.assignedToNode(); - /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ - if (shard.state() == RELOCATING) { - continue; - } - ModelNode node = nodes.get(shard.currentNodeId()); - assert node != null; - node.addShard(shard, Decision.single(Type.YES, "Already allocated on node", node.getNodeId())); - if (logger.isTraceEnabled()) { - logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); + private void buildModelFromAssigned() { + for (RoutingNode rn : routingNodes) { + ModelNode node = new ModelNode(rn); + nodes.put(rn.nodeId(), node); + for (ShardRouting shard : rn) { + assert rn.nodeId().equals(shard.currentNodeId()); + /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ + if (shard.state() != RELOCATING) { + node.addShard(shard); + if (logger.isTraceEnabled()) { + logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); + } + } } } } @@ -612,8 +572,11 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards /** * Allocates all given shards on the minimal eligible node for the shards index * with respect to the weight function. All given shards must be unassigned. + * @return true if the current configuration has been + * changed, otherwise false */ - private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) { + private boolean allocateUnassigned() { + RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); assert !nodes.isEmpty(); if (logger.isTraceEnabled()) { logger.trace("Start allocating unassigned shards"); @@ -657,7 +620,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards int secondaryLength = 0; int primaryLength = primary.length; ArrayUtil.timSort(primary, comparator); - final Set throttledNodes = Collections.newSetFromMap(new IdentityHashMap()); + final Set throttledNodes = Collections.newSetFromMap(new IdentityHashMap<>()); do { for (int i = 0; i < primaryLength; i++) { ShardRouting shard = primary[i]; @@ -695,7 +658,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards * don't check deciders */ if (currentWeight <= minWeight) { - Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(routingNodes), allocation); + Decision currentDecision = deciders.canAllocate(shard, node.getRoutingNode(), allocation); NOUPDATE: if (currentDecision.type() == Type.YES || currentDecision.type() == Type.THROTTLE) { if (currentWeight == minWeight) { @@ -736,7 +699,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } assert decision != null && minNode != null || decision == null && minNode == null; if (minNode != null) { - minNode.addShard(shard, decision); + minNode.addShard(shard); if (decision.type() == Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); @@ -745,7 +708,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards changed = true; continue; // don't add to ignoreUnassigned } else { - final RoutingNode node = minNode.getRoutingNode(routingNodes); + final RoutingNode node = minNode.getRoutingNode(); if (deciders.canAllocate(node, allocation).type() != Type.YES) { if (logger.isTraceEnabled()) { logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type()); @@ -791,10 +754,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } ShardRouting candidate = null; final AllocationDeciders deciders = allocation.deciders(); - for (ShardRouting shard : index.getAllShards()) { + for (ShardRouting shard : index) { if (shard.started()) { // skip initializing, unassigned and relocating shards we can't relocate them anyway - Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(routingNodes), allocation); + Decision allocationDecision = deciders.canAllocate(shard, minNode.getRoutingNode(), allocation); Decision rebalanceDecision = deciders.canRebalance(shard, allocation); if (((allocationDecision.type() == Type.YES) || (allocationDecision.type() == Type.THROTTLE)) && ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) { @@ -815,24 +778,17 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } if (candidate != null) { - /* allocate on the model even if not throttled */ maxNode.removeShard(candidate); - minNode.addShard(candidate, decision); + minNode.addShard(candidate); if (decision.type() == Type.YES) { /* only allocate on the cluster if we are not throttled */ if (logger.isTraceEnabled()) { logger.trace("Relocate shard [{}] from node [{}] to node [{}]", candidate, maxNode.getNodeId(), minNode.getNodeId()); } - /* now allocate on the cluster - if we are started we need to relocate the shard */ - if (candidate.started()) { - routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - - } else { - routingNodes.initialize(candidate, minNode.getNodeId(), null, allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); - } + /* now allocate on the cluster */ + routingNodes.relocate(candidate, minNode.getNodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE)); return true; - } } } @@ -846,14 +802,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } static class ModelNode implements Iterable { - private final String id; private final Map indices = new HashMap<>(); private int numShards = 0; - // lazily calculated - private RoutingNode routingNode; + private final RoutingNode routingNode; - public ModelNode(String id) { - this.id = id; + public ModelNode(RoutingNode routingNode) { + this.routingNode = routingNode; } public ModelIndex getIndex(String indexId) { @@ -861,13 +815,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } public String getNodeId() { - return id; + return routingNode.nodeId(); } - public RoutingNode getRoutingNode(RoutingNodes routingNodes) { - if (routingNode == null) { - routingNode = routingNodes.node(id); - } + public RoutingNode getRoutingNode() { return routingNode; } @@ -888,33 +839,31 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return -1; } - public void addShard(ShardRouting shard, Decision decision) { + public void addShard(ShardRouting shard) { ModelIndex index = indices.get(shard.getIndexName()); if (index == null) { index = new ModelIndex(shard.getIndexName()); indices.put(index.getIndexId(), index); } - index.addShard(shard, decision); + index.addShard(shard); numShards++; } - public Decision removeShard(ShardRouting shard) { + public void removeShard(ShardRouting shard) { ModelIndex index = indices.get(shard.getIndexName()); - Decision removed = null; if (index != null) { - removed = index.removeShard(shard); - if (removed != null && index.numShards() == 0) { + index.removeShard(shard); + if (index.numShards() == 0) { indices.remove(shard.getIndexName()); } } numShards--; - return removed; } @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Node(").append(id).append(")"); + sb.append("Node(").append(routingNode.nodeId()).append(")"); return sb.toString(); } @@ -930,9 +879,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards } - static final class ModelIndex { + static final class ModelIndex implements Iterable { private final String id; - private final Map shards = new HashMap<>(); + private final Set shards = new HashSet<>(4); // expect few shards of same index to be allocated on same node private int highestPrimary = -1; public ModelIndex(String id) { @@ -942,7 +891,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public int highestPrimary() { if (highestPrimary == -1) { int maxId = -1; - for (ShardRouting shard : shards.keySet()) { + for (ShardRouting shard : shards) { if (shard.primary()) { maxId = Math.max(maxId, shard.id()); } @@ -960,24 +909,25 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards return shards.size(); } - public Collection getAllShards() { - return shards.keySet(); + @Override + public Iterator iterator() { + return shards.iterator(); } - public Decision removeShard(ShardRouting shard) { + public void removeShard(ShardRouting shard) { highestPrimary = -1; - return shards.remove(shard); + assert shards.contains(shard) : "Shard not allocated on current node: " + shard; + shards.remove(shard); } - public void addShard(ShardRouting shard, Decision decision) { + public void addShard(ShardRouting shard) { highestPrimary = -1; - assert decision != null; - assert !shards.containsKey(shard) : "Shard already allocated on current node: " + shards.get(shard) + " " + shard; - shards.put(shard, decision); + assert !shards.contains(shard) : "Shard already allocated on current node: " + shard; + shards.add(shard); } public boolean containsShard(ShardRouting shard) { - return shards.containsKey(shard); + return shards.contains(shard); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 4d9c05527d3..0bf07e8cba9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -19,56 +19,25 @@ package org.elasticsearch.cluster.routing.allocation.allocator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; /** *

* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster. * The allocator makes basic decision where a shard instance will be allocated, if already allocated instances - * need relocate to other nodes due to node failures or due to rebalancing decisions. + * need to relocate to other nodes due to node failures or due to rebalancing decisions. *

*/ public interface ShardsAllocator { /** - * Applies changes on started nodes based on the implemented algorithm. For example if a - * shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING} - * this allocator might apply some cleanups on the node that used to hold the shard. - * @param allocation all started {@link ShardRouting shards} - */ - void applyStartedShards(StartedRerouteAllocation allocation); - - /** - * Applies changes on failed nodes based on the implemented algorithm. - * @param allocation all failed {@link ShardRouting shards} - */ - void applyFailedShards(FailedRerouteAllocation allocation); - - /** - * Assign all unassigned shards to nodes + * Allocates shards to nodes in the cluster. An implementation of this method should: + * - assign unassigned shards + * - relocate shards that cannot stay on a node anymore + * - relocate shards to find a good shard balance in the cluster * * @param allocation current node allocation * @return true if the allocation has changed, otherwise false */ - boolean allocateUnassigned(RoutingAllocation allocation); - - /** - * Rebalancing number of shards on all nodes - * - * @param allocation current node allocation - * @return true if the allocation has changed, otherwise false - */ - boolean rebalance(RoutingAllocation allocation); - - /** - * Move started shards that can not be allocated to a node anymore - * - * @param allocation current node allocation - * @return true if the allocation has changed, otherwise false - */ - boolean moveShards(RoutingAllocation allocation); + boolean allocate(RoutingAllocation allocation); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java deleted file mode 100644 index f3eb1ebbf14..00000000000 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocators.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing.allocation.allocator; - -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.gateway.GatewayAllocator; - -/** - * The {@link ShardsAllocator} class offers methods for allocating shard within a cluster. - * These methods include moving shards and re-balancing the cluster. It also allows management - * of shards by their state. - */ -public class ShardsAllocators extends AbstractComponent implements ShardsAllocator { - - private final GatewayAllocator gatewayAllocator; - private final ShardsAllocator allocator; - - public ShardsAllocators(GatewayAllocator allocator) { - this(Settings.Builder.EMPTY_SETTINGS, allocator); - } - - public ShardsAllocators(Settings settings, GatewayAllocator allocator) { - this(settings, allocator, new BalancedShardsAllocator(settings)); - } - - @Inject - public ShardsAllocators(Settings settings, GatewayAllocator gatewayAllocator, ShardsAllocator allocator) { - super(settings); - this.gatewayAllocator = gatewayAllocator; - this.allocator = allocator; - } - - @Override - public void applyStartedShards(StartedRerouteAllocation allocation) { - gatewayAllocator.applyStartedShards(allocation); - allocator.applyStartedShards(allocation); - } - - @Override - public void applyFailedShards(FailedRerouteAllocation allocation) { - gatewayAllocator.applyFailedShards(allocation); - allocator.applyFailedShards(allocation); - } - - @Override - public boolean allocateUnassigned(RoutingAllocation allocation) { - boolean changed = false; - changed |= gatewayAllocator.allocateUnassigned(allocation); - changed |= allocator.allocateUnassigned(allocation); - return changed; - } - - protected long nanoTime() { - return System.nanoTime(); - } - - @Override - public boolean rebalance(RoutingAllocation allocation) { - if (allocation.hasPendingAsyncFetch() == false) { - /* - * see https://github.com/elastic/elasticsearch/issues/14387 - * if we allow rebalance operations while we are still fetching shard store data - * we might end up with unnecessary rebalance operations which can be super confusion/frustrating - * since once the fetches come back we might just move all the shards back again. - * Therefore we only do a rebalance if we have fetched all information. - */ - return allocator.rebalance(allocation); - } else { - logger.debug("skipping rebalance due to in-flight shard/store fetches"); - return false; - } - } - - @Override - public boolean moveShards(RoutingAllocation allocation) { - return allocator.moveShards(allocation); - } -} diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index 5ccd9e9bb63..f4b1be19af4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -112,7 +112,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true"); } - final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName()); + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) { return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 9859a9b6584..227ec277469 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.HashMap; @@ -77,8 +78,11 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = + new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , Property.Dynamic, + Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope); private String[] awarenessAttributes; @@ -149,7 +153,7 @@ public class AwarenessAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "no allocation awareness enabled"); } - IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.index()); + IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); int shardCount = indexMetaData.getNumberOfReplicas() + 1; // 1 for primary for (String awarenessAttribute : awarenessAttributes) { // the node the shard exists on must be associated with an awareness attribute diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 11fce397b26..84e974aceb0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -48,7 +49,9 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = + new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), + ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope); /** * An enum representation for the configured re-balance type. diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index d39b9604066..fe6bf918dc2 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -42,7 +43,9 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = + Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, + Property.Dynamic, Property.NodeScope); private volatile int clusterConcurrentRebalance; @Inject diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 821fa55d704..e2124558f2d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; @@ -81,11 +82,22 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);; - public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = + Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = + new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", + (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = + new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", + (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = + Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, + Property.Dynamic, Property.NodeScope);; + public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = + Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); /** * Listens for a node to go over the high watermark and kicks off an empty @@ -330,7 +342,7 @@ public class DiskThresholdDecider extends AllocationDecider { } // a flag for whether the primary shard has been previously allocated - IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName()); + IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData); // checks for exact byte comparisons diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 9131355876b..0b69ba2a19e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -60,11 +61,19 @@ public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER); - public static final Setting INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX); + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = + new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, + Property.Dynamic, Property.NodeScope); + public static final Setting INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = + new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, + Property.Dynamic, Property.IndexScope); - public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER); - public static final Setting INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX); + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = + new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, + Property.Dynamic, Property.NodeScope); + public static final Setting INDEX_ROUTING_REBALANCE_ENABLE_SETTING = + new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, + Property.Dynamic, Property.IndexScope); private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; @@ -92,7 +101,7 @@ public class EnableAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); } - final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName()); + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); final Allocation enable; if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) { enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings()); @@ -127,7 +136,7 @@ public class EnableAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "rebalance disabling is ignored"); } - Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); + Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings(); final Rebalance enable; if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) { enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index f8ff5f37aed..d1aa0d8b583 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; @@ -60,9 +61,12 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String NAME = "filter"; - public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.require.", Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.include.", Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = + Setting.groupSetting("cluster.routing.allocation.exclude.", Property.Dynamic, Property.NodeScope); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; @@ -98,7 +102,7 @@ public class FilterAllocationDecider extends AllocationDecider { Decision decision = shouldClusterFilter(node, allocation); if (decision != null) return decision; - decision = shouldIndexFilter(allocation.routingNodes().metaData().index(shardRouting.index()), node, allocation); + decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation); if (decision != null) return decision; return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index e766b4c49aa..04247525f1d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -59,13 +60,17 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per index on a single Elasticsearch * node. Negative values are interpreted as unlimited. */ - public static final Setting INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX); + public static final Setting INDEX_TOTAL_SHARDS_PER_NODE_SETTING = + Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, + Property.Dynamic, Property.IndexScope); /** * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = + Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, + Property.Dynamic, Property.NodeScope); @Inject @@ -81,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index()); + IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()); final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); // Capture the limit here in case it changes during this method's // execution @@ -118,7 +123,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { @Override public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index()); + IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()); final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings); // Capture the limit here in case it changes during this method's // execution diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index cf889cde6ad..d656afc8036 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -39,7 +40,9 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = + Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, + Property.Dynamic, Property.NodeScope); private volatile boolean enableRelocation = false; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 25f43f57610..ca6b312da4c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -50,10 +51,25 @@ public class ThrottlingAllocationDecider extends AllocationDecider { public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final String NAME = "throttling"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", + Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = + Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", + DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", + (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), + Property.Dynamic, Property.NodeScope); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = + new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", + (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), + (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), + Property.Dynamic, Property.NodeScope); private volatile int primariesInitialRecoveries; diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java similarity index 75% rename from core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java rename to core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index df22bdb93e5..d3fca22c165 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -19,11 +19,9 @@ package org.elasticsearch.cluster.service; -import org.elasticsearch.Version; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Builder; import org.elasticsearch.cluster.ClusterStateListener; @@ -32,19 +30,18 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -52,9 +49,9 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; @@ -65,9 +62,7 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; @@ -78,8 +73,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Queue; -import java.util.Random; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.concurrent.Future; @@ -94,28 +87,20 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF /** * */ -public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { +public class ClusterService extends AbstractLifecycleComponent { - public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); - public static final Setting CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = + Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; - public static final Setting NODE_ID_SEED_SETTING = - // don't use node.id.seed so it won't be seen as an attribute - Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER); private final ThreadPool threadPool; private BiConsumer clusterStatePublisher; private final OperationRouting operationRouting; - private final TransportService transportService; - private final ClusterSettings clusterSettings; - private final DiscoveryNodeService discoveryNodeService; - private final Version version; - - private final TimeValue reconnectInterval; private TimeValue slowTaskLoggingThreshold; @@ -130,7 +115,8 @@ public class InternalClusterService extends AbstractLifecycleComponent> updateTasksPerExecutor = new HashMap<>(); // TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API private final Collection postAppliedListeners = new CopyOnWriteArrayList<>(); - private final Iterable preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners); + private final Iterable preAppliedListeners = Iterables.concat(priorityClusterStateListeners, + clusterStateListeners, lastClusterStateListeners); private final LocalNodeMasterListeners localNodeMasterListeners; @@ -140,60 +126,69 @@ public class InternalClusterService extends AbstractLifecycleComponent publisher) { + synchronized public void setClusterStatePublisher(BiConsumer publisher) { clusterStatePublisher = publisher; } - @Override - public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { + synchronized public void setLocalNode(DiscoveryNode localNode) { + assert clusterState.nodes().localNodeId() == null : "local node is already set"; + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.id()); + this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); + } + + synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; + this.nodeConnectionsService = nodeConnectionsService; + } + + /** + * Adds an initial block to be set on the first cluster state created. + */ + synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } initialBlocks.addGlobalBlock(block); } - @Override - public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { + /** + * Remove an initial block to be set on the first cluster state created. + */ + synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { removeInitialStateBlock(block.id()); } - @Override - public void removeInitialStateBlock(int blockId) throws IllegalStateException { + /** + * Remove an initial block to be set on the first cluster state created. + */ + synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException { if (lifecycle.started()) { throw new IllegalStateException("can't set initial block when started"); } @@ -201,28 +196,27 @@ public class InternalClusterService extends AbstractLifecycleComponent nodeAttributes = discoveryNodeService.buildAttributes(); - // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling - final String nodeId = generateNodeId(settings); - final TransportAddress publishAddress = transportService.boundAddress().publishAddress(); - DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version); - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()); - this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).blocks(initialBlocks).build(); - this.transportService.setLocalNode(localNode); + this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), + threadPool.getThreadContext()); + this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build(); } @Override - protected void doStop() { - FutureUtils.cancel(this.reconnectToNodes); + synchronized protected void doStop() { for (NotifyTimeout onGoingTimeout : onGoingTimeouts) { onGoingTimeout.cancel(); + try { + onGoingTimeout.cancel(); + onGoingTimeout.listener.onClose(); + } catch (Exception ex) { + logger.debug("failed to notify listeners on shutdown", ex); + } } ThreadPool.terminate(updateTasksExecutor, 10, TimeUnit.SECONDS); postAppliedListeners.stream().filter(listener -> listener instanceof TimeoutClusterStateListener) @@ -231,40 +225,51 @@ public class InternalClusterService extends AbstractLifecycleComponent the type of the cluster state update task state + */ public void submitStateUpdateTask(final String source, final T task, final ClusterStateTaskConfig config, final ClusterStateTaskExecutor executor, @@ -334,9 +373,9 @@ public class InternalClusterService extends AbstractLifecycleComponent void innerSubmitStateUpdateTask(final String source, final T task, - final ClusterStateTaskConfig config, - final ClusterStateTaskExecutor executor, - final SafeClusterStateTaskListener listener) { + final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor, + final SafeClusterStateTaskListener listener) { if (!lifecycle.started()) { return; } @@ -352,7 +391,8 @@ public class InternalClusterService extends AbstractLifecycleComponent pendingTasks() { PrioritizedEsThreadPoolExecutor.Pending[] pendings = updateTasksExecutor.getPending(); List pendingClusterTasks = new ArrayList<>(pendings.length); @@ -386,29 +428,32 @@ public class InternalClusterService extends AbstractLifecycleComponent batchResult; - long startTimeNS = System.nanoTime(); + long startTimeNS = currentTimeInNanos(); try { List inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); batchResult = executor.execute(previousClusterState, inputs); } catch (Throwable e) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n"); - sb.append(previousClusterState.nodes().prettyPrint()); - sb.append(previousClusterState.routingTable().prettyPrint()); - sb.append(previousClusterState.getRoutingNodes().prettyPrint()); - logger.trace(sb.toString(), e); + logger.trace("failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", e, executionTime, + previousClusterState.version(), source, previousClusterState.nodes().prettyPrint(), + previousClusterState.routingTable().prettyPrint(), previousClusterState.getRoutingNodes().prettyPrint()); } warnAboutSlowTaskIfNeeded(executionTime, source); - batchResult = ClusterStateTaskExecutor.BatchResult.builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState); + batchResult = ClusterStateTaskExecutor.BatchResult.builder() + .failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e) + .build(previousClusterState); } assert batchResult.executionResults != null; assert batchResult.executionResults.size() == toExecute.size() - : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size()); + : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), + toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size()); boolean assertsEnabled = false; assert (assertsEnabled = true); if (assertsEnabled) { @@ -494,11 +540,11 @@ public class InternalClusterService extends AbstractLifecycleComponent proccessedListeners.add(updateTask), - ex -> { - logger.debug("cluster state update task [{}] failed", ex, updateTask.source); - updateTask.listener.onFailure(updateTask.source, ex); - } + () -> proccessedListeners.add(updateTask), + ex -> { + logger.debug("cluster state update task [{}] failed", ex, updateTask.source); + updateTask.listener.onFailure(updateTask.source, ex); + } ); } @@ -510,8 +556,8 @@ public class InternalClusterService extends AbstractLifecycleComponent executor, ClusterStateTaskListener listener) { + UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor executor, + ClusterStateTaskListener listener) { super(config.priority(), source); this.task = task; this.config = config; @@ -778,7 +813,8 @@ public class InternalClusterService extends AbstractLifecycleComponent slowTaskLoggingThreshold.getMillis()) { - logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); + logger.warn("cluster state update task [{}] took [{}] above the warn threshold of {}", source, executionTime, + slowTaskLoggingThreshold); } } @@ -810,64 +846,6 @@ public class InternalClusterService extends AbstractLifecycleComponent failureCount = ConcurrentCollections.newConcurrentMap(); - - @Override - public void run() { - // master node will check against all nodes if its alive with certain discoveries implementations, - // but we can't rely on that, so we check on it as well - for (DiscoveryNode node : clusterState.nodes()) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time... - if (!transportService.nodeConnected(node)) { - try { - transportService.connectToNode(node); - } catch (Exception e) { - if (lifecycle.stoppedOrClosed()) { - return; - } - if (clusterState.nodes().nodeExists(node.id())) { // double check here as well, maybe its gone? - Integer nodeFailureCount = failureCount.get(node); - if (nodeFailureCount == null) { - nodeFailureCount = 1; - } else { - nodeFailureCount = nodeFailureCount + 1; - } - // log every 6th failure - if ((nodeFailureCount % 6) == 0) { - // reset the failure count... - nodeFailureCount = 0; - logger.warn("failed to reconnect to node {}", e, node); - } - failureCount.put(node, nodeFailureCount); - } - } - } - } - } - // go over and remove failed nodes that have been removed - DiscoveryNodes nodes = clusterState.nodes(); - for (Iterator failedNodesIt = failureCount.keySet().iterator(); failedNodesIt.hasNext(); ) { - DiscoveryNode failedNode = failedNodesIt.next(); - if (!nodes.nodeExists(failedNode.id())) { - failedNodesIt.remove(); - } - } - if (lifecycle.started()) { - reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, this); - } - } - } - - public static String generateNodeId(Settings settings) { - Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); - return Strings.randomBase64UUID(random); - } - private static class LocalNodeMasterListeners implements ClusterStateListener { private final List listeners = new CopyOnWriteArrayList<>(); @@ -971,7 +949,8 @@ public class InternalClusterService extends AbstractLifecycleComponent ackTimeoutCallback; private Throwable lastFailure; - AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) { + AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, + ThreadPool threadPool) { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; this.nodes = nodes; @@ -1023,4 +1002,8 @@ public class InternalClusterService extends AbstractLifecycleComponent> permissions = new HashMap<>(paths.length); - Map owners = new HashMap<>(paths.length); - Map groups = new HashMap<>(paths.length); - - if (paths != null && paths.length > 0) { - for (Path path : paths) { - try { - boolean supportsPosixPermissions = Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class); - if (supportsPosixPermissions) { - PosixFileAttributes attributes = Files.readAttributes(path, PosixFileAttributes.class); - permissions.put(path, attributes.permissions()); - owners.put(path, attributes.owner().getName()); - groups.put(path, attributes.group().getName()); - } - } catch (IOException e) { - // silently swallow if not supported, no need to log things - } - } - } - - CliTool.ExitStatus status = doExecute(settings, env); - - // check if permissions differ - for (Map.Entry> entry : permissions.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - Set permissionsBeforeWrite = entry.getValue(); - Set permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey()); - if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed " - + "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] " - + "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]"); - terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!"); - } - } - - // check if owner differs - for (Map.Entry entry : owners.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - String ownerBeforeWrite = entry.getValue(); - String ownerAfterWrite = Files.getOwner(entry.getKey()).getName(); - if (!ownerAfterWrite.equals(ownerBeforeWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]"); - } - } - - // check if group differs - for (Map.Entry entry : groups.entrySet()) { - if (!Files.exists(entry.getKey())) { - continue; - } - - String groupBeforeWrite = entry.getValue(); - String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName(); - if (!groupAfterWrite.equals(groupBeforeWrite)) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]"); - } - } - - return status; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java b/core/src/main/java/org/elasticsearch/common/cli/CliTool.java deleted file mode 100644 index 2ea01f45068..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CliTool.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.AlreadySelectedException; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.DefaultParser; -import org.apache.commons.cli.MissingArgumentException; -import org.apache.commons.cli.MissingOptionException; -import org.apache.commons.cli.UnrecognizedOptionException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.internal.InternalSettingsPreparer; - -import java.util.Locale; - -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; - -/** - * A base class for command-line interface tool. - * - * Two modes are supported: - * - * - Single command mode. The tool exposes a single command that can potentially accept arguments (eg. CLI options). - * - Multi command mode. The tool support multiple commands, each for different tasks, each potentially accepts arguments. - * - * In a multi-command mode. The first argument must be the command name. For example, the plugin manager - * can be seen as a multi-command tool with two possible commands: install and uninstall - * - * The tool is configured using a {@link CliToolConfig} which encapsulates the tool's commands and their - * potential options. The tool also comes with out of the box simple help support (the -h/--help option is - * automatically handled) where the help text is configured in a dedicated *.help files located in the same package - * as the tool. - */ -public abstract class CliTool { - - // based on sysexits.h - public enum ExitStatus { - OK(0), - OK_AND_EXIT(0), - USAGE(64), /* command line usage error */ - DATA_ERROR(65), /* data format error */ - NO_INPUT(66), /* cannot open input */ - NO_USER(67), /* addressee unknown */ - NO_HOST(68), /* host name unknown */ - UNAVAILABLE(69), /* service unavailable */ - CODE_ERROR(70), /* internal software error */ - CANT_CREATE(73), /* can't create (user) output file */ - IO_ERROR(74), /* input/output error */ - TEMP_FAILURE(75), /* temp failure; user is invited to retry */ - PROTOCOL(76), /* remote error in protocol */ - NOPERM(77), /* permission denied */ - CONFIG(78); /* configuration error */ - - final int status; - - ExitStatus(int status) { - this.status = status; - } - - public int status() { - return status; - } - } - - protected final Terminal terminal; - protected final Environment env; - protected final Settings settings; - - private final CliToolConfig config; - - protected CliTool(CliToolConfig config) { - this(config, Terminal.DEFAULT); - } - - protected CliTool(CliToolConfig config, Terminal terminal) { - if (config.cmds().size() == 0) { - throw new IllegalArgumentException("At least one command must be configured"); - } - this.config = config; - this.terminal = terminal; - env = InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal); - settings = env.settings(); - } - - public final ExitStatus execute(String... args) throws Exception { - - // first lets see if the user requests tool help. We're doing it only if - // this is a multi-command tool. If it's a single command tool, the -h/--help - // option will be taken care of on the command level - if (!config.isSingle() && args.length > 0 && (args[0].equals("-h") || args[0].equals("--help"))) { - config.printUsage(terminal); - return ExitStatus.OK_AND_EXIT; - } - - CliToolConfig.Cmd cmd; - if (config.isSingle()) { - cmd = config.single(); - } else { - - if (args.length == 0) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified"); - config.printUsage(terminal); - return ExitStatus.USAGE; - } - - String cmdName = args[0]; - cmd = config.cmd(cmdName); - if (cmd == null) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands"); - return ExitStatus.USAGE; - } - - // we now remove the command name from the args - if (args.length == 1) { - args = new String[0]; - } else { - String[] cmdArgs = new String[args.length - 1]; - System.arraycopy(args, 1, cmdArgs, 0, cmdArgs.length); - args = cmdArgs; - } - } - - try { - return parse(cmd, args).execute(settings, env); - } catch (UserError error) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage()); - return error.exitStatus; - } - } - - public Command parse(String cmdName, String[] args) throws Exception { - CliToolConfig.Cmd cmd = config.cmd(cmdName); - return parse(cmd, args); - } - - public Command parse(CliToolConfig.Cmd cmd, String[] args) throws Exception { - CommandLineParser parser = new DefaultParser(); - CommandLine cli = parser.parse(CliToolConfig.OptionsSource.HELP.options(), args, true); - if (cli.hasOption("h")) { - return helpCmd(cmd); - } - try { - cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption()); - } catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) { - // intentionally drop the stack trace here as these are really user errors, - // the stack trace into cli parsing lib is not important - throw new UserError(ExitStatus.USAGE, e.toString()); - } - - if (cli.hasOption("v")) { - terminal.setVerbosity(Terminal.Verbosity.VERBOSE); - } else if (cli.hasOption("s")) { - terminal.setVerbosity(Terminal.Verbosity.SILENT); - } else { - terminal.setVerbosity(Terminal.Verbosity.NORMAL); - } - return parse(cmd.name(), cli); - } - - protected Command.Help helpCmd(CliToolConfig.Cmd cmd) { - return new Command.Help(cmd, terminal); - } - - protected static Command.Exit exitCmd(ExitStatus status) { - return new Command.Exit(null, status, null); - } - - protected static Command.Exit exitCmd(ExitStatus status, Terminal terminal, String msg, Object... args) { - return new Command.Exit(String.format(Locale.ROOT, msg, args), status, terminal); - } - - protected abstract Command parse(String cmdName, CommandLine cli) throws Exception; - - public static abstract class Command { - - protected final Terminal terminal; - - protected Command(Terminal terminal) { - this.terminal = terminal; - } - - public abstract ExitStatus execute(Settings settings, Environment env) throws Exception; - - public static class Help extends Command { - - private final CliToolConfig.Cmd cmd; - - private Help(CliToolConfig.Cmd cmd, Terminal terminal) { - super(terminal); - this.cmd = cmd; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - cmd.printUsage(terminal); - return ExitStatus.OK_AND_EXIT; - } - } - - public static class Exit extends Command { - private final String msg; - private final ExitStatus status; - - private Exit(String msg, ExitStatus status, Terminal terminal) { - super(terminal); - this.msg = msg; - this.status = status; - } - - @Override - public ExitStatus execute(Settings settings, Environment env) throws Exception { - if (msg != null) { - if (status != ExitStatus.OK) { - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg); - } else { - terminal.println(msg); - } - } - return status; - } - - public ExitStatus status() { - return status; - } - } - } - - - -} - diff --git a/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java b/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java deleted file mode 100644 index d0ba897b33d..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/CliToolConfig.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.apache.commons.cli.Option; -import org.apache.commons.cli.OptionGroup; -import org.apache.commons.cli.Options; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public class CliToolConfig { - - public static Builder config(String name, Class toolType) { - return new Builder(name, toolType); - } - - private final Class toolType; - private final String name; - private final Map cmds; - - private static final HelpPrinter helpPrinter = new HelpPrinter(); - - private CliToolConfig(String name, Class toolType, Cmd[] cmds) { - this.name = name; - this.toolType = toolType; - final Map cmdsMapping = new HashMap<>(); - for (int i = 0; i < cmds.length; i++) { - cmdsMapping.put(cmds[i].name, cmds[i]); - } - this.cmds = Collections.unmodifiableMap(cmdsMapping); - } - - public boolean isSingle() { - return cmds.size() == 1; - } - - public Cmd single() { - assert isSingle() : "Requesting single command on a multi-command tool"; - return cmds.values().iterator().next(); - } - - public Class toolType() { - return toolType; - } - - public String name() { - return name; - } - - public Collection cmds() { - return cmds.values(); - } - - public Cmd cmd(String name) { - return cmds.get(name); - } - - public void printUsage(Terminal terminal) { - helpPrinter.print(this, terminal); - } - - public static class Builder { - - public static Cmd.Builder cmd(String name, Class cmdType) { - return new Cmd.Builder(name, cmdType); - } - - public static OptionBuilder option(String shortName, String longName) { - return new OptionBuilder(shortName, longName); - } - - public static Option.Builder optionBuilder(String shortName, String longName) { - return Option.builder(shortName).argName(longName).longOpt(longName); - } - - public static OptionGroupBuilder optionGroup(boolean required) { - return new OptionGroupBuilder(required); - } - - private final Class toolType; - private final String name; - private Cmd[] cmds; - - private Builder(String name, Class toolType) { - this.name = name; - this.toolType = toolType; - } - - public Builder cmds(Cmd.Builder... cmds) { - this.cmds = new Cmd[cmds.length]; - for (int i = 0; i < cmds.length; i++) { - this.cmds[i] = cmds[i].build(); - this.cmds[i].toolName = name; - } - return this; - } - - public Builder cmds(Cmd... cmds) { - for (int i = 0; i < cmds.length; i++) { - cmds[i].toolName = name; - } - this.cmds = cmds; - return this; - } - - public CliToolConfig build() { - return new CliToolConfig(name, toolType, cmds); - } - } - - public static class Cmd { - - private String toolName; - private final String name; - private final Class cmdType; - private final Options options; - private final boolean stopAtNonOption; - - private Cmd(String name, Class cmdType, Options options, boolean stopAtNonOption) { - this.name = name; - this.cmdType = cmdType; - this.options = options; - this.stopAtNonOption = stopAtNonOption; - OptionsSource.VERBOSITY.populate(options); - } - - public Class cmdType() { - return cmdType; - } - - public String name() { - return name; - } - - public Options options() { - return options; - } - - public boolean isStopAtNonOption() { - return stopAtNonOption; - } - - public void printUsage(Terminal terminal) { - helpPrinter.print(toolName, this, terminal); - } - - public static class Builder { - - private final String name; - private final Class cmdType; - private Options options = new Options(); - private boolean stopAtNonOption = false; - - private Builder(String name, Class cmdType) { - this.name = name; - this.cmdType = cmdType; - } - - public Builder options(OptionBuilder... optionBuilder) { - for (int i = 0; i < optionBuilder.length; i++) { - options.addOption(optionBuilder[i].build()); - } - return this; - } - - public Builder options(Option.Builder... optionBuilders) { - for (int i = 0; i < optionBuilders.length; i++) { - options.addOption(optionBuilders[i].build()); - } - return this; - } - - public Builder optionGroups(OptionGroupBuilder... optionGroupBuilders) { - for (OptionGroupBuilder builder : optionGroupBuilders) { - options.addOptionGroup(builder.build()); - } - return this; - } - - /** - * @param stopAtNonOption if true an unrecognized argument stops - * the parsing and the remaining arguments are added to the - * args list. If false an unrecognized - * argument triggers a ParseException. - */ - public Builder stopAtNonOption(boolean stopAtNonOption) { - this.stopAtNonOption = stopAtNonOption; - return this; - } - - public Cmd build() { - return new Cmd(name, cmdType, options, stopAtNonOption); - } - } - } - - public static class OptionBuilder { - - private final Option option; - - private OptionBuilder(String shortName, String longName) { - option = new Option(shortName, ""); - option.setLongOpt(longName); - option.setArgName(longName); - } - - public OptionBuilder required(boolean required) { - option.setRequired(required); - return this; - } - - public OptionBuilder hasArg(boolean optional) { - option.setOptionalArg(optional); - option.setArgs(1); - return this; - } - - public Option build() { - return option; - } - } - - public static class OptionGroupBuilder { - - private OptionGroup group; - - private OptionGroupBuilder(boolean required) { - group = new OptionGroup(); - group.setRequired(required); - } - - public OptionGroupBuilder options(OptionBuilder... optionBuilders) { - for (OptionBuilder builder : optionBuilders) { - group.addOption(builder.build()); - } - return this; - } - - public OptionGroup build() { - return group; - } - - } - - static abstract class OptionsSource { - - static final OptionsSource HELP = new OptionsSource() { - - @Override - void populate(Options options) { - options.addOption(new OptionBuilder("h", "help").required(false).build()); - } - }; - - static final OptionsSource VERBOSITY = new OptionsSource() { - @Override - void populate(Options options) { - OptionGroup verbosityGroup = new OptionGroup(); - verbosityGroup.setRequired(false); - verbosityGroup.addOption(new OptionBuilder("s", "silent").required(false).build()); - verbosityGroup.addOption(new OptionBuilder("v", "verbose").required(false).build()); - options.addOptionGroup(verbosityGroup); - } - }; - - private Options options; - - Options options() { - if (options == null) { - options = new Options(); - populate(options); - } - return options; - } - - abstract void populate(Options options); - - } -} diff --git a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java b/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java deleted file mode 100644 index ada6cc33a19..00000000000 --- a/core/src/main/java/org/elasticsearch/common/cli/HelpPrinter.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.cli; - -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.util.Callback; - -import java.io.IOException; -import java.io.InputStream; - -/** - * - */ -public class HelpPrinter { - - private static final String HELP_FILE_EXT = ".help"; - - public void print(CliToolConfig config, Terminal terminal) { - print(config.toolType(), config.name(), terminal); - } - - public void print(String toolName, CliToolConfig.Cmd cmd, Terminal terminal) { - print(cmd.cmdType(), toolName + "-" + cmd.name(), terminal); - } - - private static void print(Class clazz, String name, final Terminal terminal) { - terminal.println(Terminal.Verbosity.SILENT, ""); - try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) { - Streams.readAllLines(input, new Callback() { - @Override - public void handle(String line) { - terminal.println(Terminal.Verbosity.SILENT, line); - } - }); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - terminal.println(Terminal.Verbosity.SILENT, ""); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index 97ef6561c9b..658d8ed84c1 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -36,8 +36,6 @@ public class CircleBuilder extends ShapeBuilder { public static final String FIELD_RADIUS = "radius"; public static final GeoShapeType TYPE = GeoShapeType.CIRCLE; - public static final CircleBuilder PROTOTYPE = new CircleBuilder(); - private DistanceUnit unit = DistanceUnit.DEFAULT; private double radius; private Coordinate center; @@ -50,6 +48,21 @@ public class CircleBuilder extends ShapeBuilder { this.center = ZERO_ZERO; } + /** + * Read from a stream. + */ + public CircleBuilder(StreamInput in) throws IOException { + center(readFromStream(in)); + radius(in.readDouble(), DistanceUnit.readFromStream(in));; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeCoordinateTo(center, out); + out.writeDouble(radius); + unit.writeTo(out); + } + /** * Set the center of the circle * @@ -170,18 +183,4 @@ public class CircleBuilder extends ShapeBuilder { Objects.equals(radius, other.radius) && Objects.equals(unit.ordinal(), other.unit.ordinal()); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - writeCoordinateTo(center, out); - out.writeDouble(radius); - DistanceUnit.writeDistanceUnit(out, unit); - } - - @Override - public CircleBuilder readFrom(StreamInput in) throws IOException { - return new CircleBuilder() - .center(readCoordinateFrom(in)) - .radius(in.readDouble(), DistanceUnit.readDistanceUnit(in)); - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java index 72ac7be8114..b6b9df45d04 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java @@ -21,9 +21,12 @@ package org.elasticsearch.common.geo.builders; import com.vividsolutions.jts.geom.Coordinate; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -48,6 +51,25 @@ public abstract class CoordinateCollection> ex this.coordinates = coordinates; } + /** + * Read from a stream. + */ + protected CoordinateCollection(StreamInput in) throws IOException { + int size = in.readVInt(); + coordinates = new ArrayList<>(size); + for (int i=0; i < size; i++) { + coordinates.add(readFromStream(in)); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(coordinates.size()); + for (Coordinate point : coordinates) { + writeCoordinateTo(point, out); + } + } + @SuppressWarnings("unchecked") private E thisRef() { return (E)this; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index ab997387ea1..5b80ceeeeea 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -33,11 +33,12 @@ public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; - public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0)); - - private Coordinate topLeft; - private Coordinate bottomRight; + private final Coordinate topLeft; + private final Coordinate bottomRight; + /** + * Build an envelope from the top left and bottom right coordinates. + */ public EnvelopeBuilder(Coordinate topLeft, Coordinate bottomRight) { Objects.requireNonNull(topLeft, "topLeft of envelope cannot be null"); Objects.requireNonNull(bottomRight, "bottomRight of envelope cannot be null"); @@ -45,6 +46,20 @@ public class EnvelopeBuilder extends ShapeBuilder { this.bottomRight = bottomRight; } + /** + * Read from a stream. + */ + public EnvelopeBuilder(StreamInput in) throws IOException { + topLeft = readFromStream(in); + bottomRight = readFromStream(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeCoordinateTo(topLeft, out); + writeCoordinateTo(bottomRight, out); + } + public Coordinate topLeft() { return this.topLeft; } @@ -91,15 +106,4 @@ public class EnvelopeBuilder extends ShapeBuilder { return Objects.equals(topLeft, other.topLeft) && Objects.equals(bottomRight, other.bottomRight); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - writeCoordinateTo(topLeft, out); - writeCoordinateTo(bottomRight, out); - } - - @Override - public EnvelopeBuilder readFrom(StreamInput in) throws IOException { - return new EnvelopeBuilder(readCoordinateFrom(in), readCoordinateFrom(in)); - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index d21f47cf053..b8559fcb48f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -36,9 +36,34 @@ public class GeometryCollectionBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION; - public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder(); + /** + * List of shapes. Package scope for testing. + */ + final List shapes = new ArrayList<>(); - protected final ArrayList shapes = new ArrayList<>(); + /** + * Build and empty GeometryCollectionBuilder. + */ + public GeometryCollectionBuilder() { + } + + /** + * Read from a stream. + */ + public GeometryCollectionBuilder(StreamInput in) throws IOException { + int shapes = in.readVInt(); + for (int i = 0; i < shapes; i++) { + shape(in.readShape()); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shapes.size()); + for (ShapeBuilder shape : shapes) { + out.writeShape(shape); + } + } public GeometryCollectionBuilder shape(ShapeBuilder shape) { this.shapes.add(shape); @@ -146,23 +171,4 @@ public class GeometryCollectionBuilder extends ShapeBuilder { GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj; return Objects.equals(shapes, other.shapes); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(shapes.size()); - for (ShapeBuilder shape : shapes) { - out.writeShape(shape); - } - } - - @Override - public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException { - GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder(); - int shapes = in.readVInt(); - for (int i = 0; i < shapes; i++) { - geometryCollectionBuilder.shape(in.readShape()); - } - return geometryCollectionBuilder; - } - } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index cbc9002c785..e79578d9ab2 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -19,15 +19,14 @@ package org.elasticsearch.common.geo.builders; -import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; import com.vividsolutions.jts.geom.LineString; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; import java.util.ArrayList; @@ -36,6 +35,7 @@ import java.util.List; import java.util.Objects; public class LineStringBuilder extends CoordinateCollection { + public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; /** * Construct a new LineString. @@ -55,9 +55,12 @@ public class LineStringBuilder extends CoordinateCollection { this(coordinates.build()); } - public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; - - public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0)); + /** + * Read from a stream. + */ + public LineStringBuilder(StreamInput in) throws IOException { + super(in); + } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { @@ -182,23 +185,4 @@ public class LineStringBuilder extends CoordinateCollection { LineStringBuilder other = (LineStringBuilder) obj; return Objects.equals(coordinates, other.coordinates); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(coordinates.size()); - for (Coordinate point : coordinates) { - writeCoordinateTo(point, out); - } - } - - @Override - public LineStringBuilder readFrom(StreamInput in) throws IOException { - CoordinatesBuilder coordinates = new CoordinatesBuilder(); - int size = in.readVInt(); - for (int i=0; i < size; i++) { - coordinates.coordinate(readCoordinateFrom(in)); - } - LineStringBuilder lineStringBuilder = new LineStringBuilder(coordinates); - return lineStringBuilder; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 51f4fd232c5..04e25862c8b 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -37,10 +37,29 @@ public class MultiLineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; - public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder(); - private final ArrayList lines = new ArrayList<>(); + public MultiLineStringBuilder() { + } + + /** + * Read from a stream. + */ + public MultiLineStringBuilder(StreamInput in) throws IOException { + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + linestring(new LineStringBuilder(in)); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(lines.size()); + for (LineStringBuilder line : lines) { + line.writeTo(out); + } + } + public MultiLineStringBuilder linestring(LineStringBuilder line) { this.lines.add(line); return this; @@ -114,22 +133,4 @@ public class MultiLineStringBuilder extends ShapeBuilder { MultiLineStringBuilder other = (MultiLineStringBuilder) obj; return Objects.equals(lines, other.lines); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(lines.size()); - for (LineStringBuilder line : lines) { - line.writeTo(out); - } - } - - @Override - public MultiLineStringBuilder readFrom(StreamInput in) throws IOException { - MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in)); - } - return multiLineStringBuilder; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index b8f2c8137ef..f8a06244362 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -19,14 +19,13 @@ package org.elasticsearch.common.geo.builders; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Shape; import java.io.IOException; import java.util.ArrayList; @@ -37,8 +36,6 @@ public class MultiPointBuilder extends CoordinateCollection { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; - public static final MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build()); - /** * Create a new {@link MultiPointBuilder}. * @param coordinates needs at least two coordinates to be valid, otherwise will throw an exception @@ -47,6 +44,13 @@ public class MultiPointBuilder extends CoordinateCollection { super(coordinates); } + /** + * Read from a stream. + */ + public MultiPointBuilder(StreamInput in) throws IOException { + super(in); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -91,24 +95,4 @@ public class MultiPointBuilder extends CoordinateCollection { MultiPointBuilder other = (MultiPointBuilder) obj; return Objects.equals(coordinates, other.coordinates); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(coordinates.size()); - for (Coordinate point : coordinates) { - writeCoordinateTo(point, out); - } - } - - @Override - public MultiPointBuilder readFrom(StreamInput in) throws IOException { - int size = in.readVInt(); - List points = new ArrayList(size); - for (int i=0; i < size; i++) { - points.add(readCoordinateFrom(in)); - } - MultiPointBuilder multiPointBuilder = new MultiPointBuilder(points); - - return multiPointBuilder; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index 6ee679b7308..f5e5bca5051 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -36,20 +36,45 @@ import java.util.Objects; public class MultiPolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; - public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder(); - private final ArrayList polygons = new ArrayList<>(); + private final List polygons = new ArrayList<>(); - private Orientation orientation = Orientation.RIGHT; + private final Orientation orientation; + /** + * Build a MultiPolygonBuilder with RIGHT orientation. + */ public MultiPolygonBuilder() { this(Orientation.RIGHT); } + /** + * Build a MultiPolygonBuilder with an arbitrary orientation. + */ public MultiPolygonBuilder(Orientation orientation) { this.orientation = orientation; } + /** + * Read from a stream. + */ + public MultiPolygonBuilder(StreamInput in) throws IOException { + orientation = Orientation.readFrom(in); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polygon(new PolygonBuilder(in)); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + orientation.writeTo(out); + out.writeVInt(polygons.size()); + for (PolygonBuilder polygon : polygons) { + polygon.writeTo(out); + } + } + public Orientation orientation() { return this.orientation; } @@ -70,7 +95,7 @@ public class MultiPolygonBuilder extends ShapeBuilder { /** * get the list of polygons */ - public ArrayList polygons() { + public List polygons() { return polygons; } @@ -134,23 +159,4 @@ public class MultiPolygonBuilder extends ShapeBuilder { return Objects.equals(polygons, other.polygons) && Objects.equals(orientation, other.orientation); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - orientation.writeTo(out); - out.writeVInt(polygons.size()); - for (PolygonBuilder polygon : polygons) { - polygon.writeTo(out); - } - } - - @Override - public MultiPolygonBuilder readFrom(StreamInput in) throws IOException { - MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in)); - int holes = in.readVInt(); - for (int i = 0; i < holes; i++) { - polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in)); - } - return polyBuilder; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 30b7e370f22..fdd9826410a 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -30,9 +30,7 @@ import java.io.IOException; import java.util.Objects; public class PointBuilder extends ShapeBuilder { - public static final GeoShapeType TYPE = GeoShapeType.POINT; - public static final PointBuilder PROTOTYPE = new PointBuilder(); private Coordinate coordinate; @@ -43,6 +41,18 @@ public class PointBuilder extends ShapeBuilder { this.coordinate = ZERO_ZERO; } + /** + * Read from a stream. + */ + public PointBuilder(StreamInput in) throws IOException { + coordinate = readFromStream(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writeCoordinateTo(coordinate, out); + } + public PointBuilder coordinate(Coordinate coordinate) { this.coordinate = coordinate; return this; @@ -91,14 +101,4 @@ public class PointBuilder extends ShapeBuilder { PointBuilder other = (PointBuilder) obj; return Objects.equals(coordinate, other.coordinate); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - writeCoordinateTo(coordinate, out); - } - - @Override - public PointBuilder readFrom(StreamInput in) throws IOException { - return new PointBuilder().coordinate(readCoordinateFrom(in)); - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 4a9c8441072..9fad4fb8ef3 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -53,8 +53,6 @@ import java.util.concurrent.atomic.AtomicBoolean; public class PolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POLYGON; - public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0) - .coordinate(1.0, 0.0).coordinate(0.0, 0.0)); private static final Coordinate[][] EMPTY = new Coordinate[0][]; @@ -64,7 +62,7 @@ public class PolygonBuilder extends ShapeBuilder { private LineStringBuilder shell; // List of line strings defining the holes of the polygon - private final ArrayList holes = new ArrayList<>(); + private final List holes = new ArrayList<>(); public PolygonBuilder(LineStringBuilder lineString, Orientation orientation, boolean coerce) { this.orientation = orientation; @@ -87,6 +85,28 @@ public class PolygonBuilder extends ShapeBuilder { this(coordinates, Orientation.RIGHT); } + /** + * Read from a stream. + */ + public PolygonBuilder(StreamInput in) throws IOException { + shell = new LineStringBuilder(in); + orientation = Orientation.readFrom(in); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + hole(new LineStringBuilder(in)); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shell.writeTo(out); + orientation.writeTo(out); + out.writeVInt(holes.size()); + for (LineStringBuilder hole : holes) { + hole.writeTo(out); + } + } + public Orientation orientation() { return this.orientation; } @@ -314,7 +334,7 @@ public class PolygonBuilder extends ShapeBuilder { double shiftOffset = any.coordinate.x > DATELINE ? DATELINE : (any.coordinate.x < -DATELINE ? -DATELINE : 0); if (debugEnabled()) { - LOGGER.debug("shift: {[]}", shiftOffset); + LOGGER.debug("shift: [{}]", shiftOffset); } // run along the border of the component, collect the @@ -383,18 +403,18 @@ public class PolygonBuilder extends ShapeBuilder { return coordinates; } - private static Coordinate[][][] buildCoordinates(ArrayList> components) { + private static Coordinate[][][] buildCoordinates(List> components) { Coordinate[][][] result = new Coordinate[components.size()][][]; for (int i = 0; i < result.length; i++) { - ArrayList component = components.get(i); + List component = components.get(i); result[i] = component.toArray(new Coordinate[component.size()][]); } if(debugEnabled()) { for (int i = 0; i < result.length; i++) { - LOGGER.debug("Component {[]}:", i); + LOGGER.debug("Component [{}]:", i); for (int j = 0; j < result[i].length; j++) { - LOGGER.debug("\t" + Arrays.toString(result[i][j])); + LOGGER.debug("\t{}", Arrays.toString(result[i][j])); } } } @@ -416,13 +436,13 @@ public class PolygonBuilder extends ShapeBuilder { return points; } - private static Edge[] edges(Edge[] edges, int numHoles, ArrayList> components) { + private static Edge[] edges(Edge[] edges, int numHoles, List> components) { ArrayList mainEdges = new ArrayList<>(edges.length); for (int i = 0; i < edges.length; i++) { if (edges[i].component >= 0) { int length = component(edges[i], -(components.size()+numHoles+1), mainEdges); - ArrayList component = new ArrayList<>(); + List component = new ArrayList<>(); component.add(coordinates(edges[i], new Coordinate[length+1])); components.add(component); } @@ -432,19 +452,19 @@ public class PolygonBuilder extends ShapeBuilder { } private static Coordinate[][][] compose(Edge[] edges, Edge[] holes, int numHoles) { - final ArrayList> components = new ArrayList<>(); + final List> components = new ArrayList<>(); assign(holes, holes(holes, numHoles), numHoles, edges(edges, numHoles, components), components); return buildCoordinates(components); } - private static void assign(Edge[] holes, Coordinate[][] points, int numHoles, Edge[] edges, ArrayList> components) { + private static void assign(Edge[] holes, Coordinate[][] points, int numHoles, Edge[] edges, List> components) { // Assign Hole to related components // To find the new component the hole belongs to all intersections of the // polygon edges with a vertical line are calculated. This vertical line // is an arbitrary point of the hole. The polygon edge next to this point // is part of the polygon the hole belongs to. if (debugEnabled()) { - LOGGER.debug("Holes: " + Arrays.toString(holes)); + LOGGER.debug("Holes: {}", Arrays.toString(holes)); } for (int i = 0; i < numHoles; i++) { final Edge current = new Edge(holes[i].coordinate, holes[i].next); @@ -464,9 +484,9 @@ public class PolygonBuilder extends ShapeBuilder { final int component = -edges[index].component - numHoles - 1; if(debugEnabled()) { - LOGGER.debug("\tposition ("+index+") of edge "+current+": " + edges[index]); - LOGGER.debug("\tComponent: " + component); - LOGGER.debug("\tHole intersections ("+current.coordinate.x+"): " + Arrays.toString(edges)); + LOGGER.debug("\tposition ({}) of edge {}: {}", index, current, edges[index]); + LOGGER.debug("\tComponent: {}", component); + LOGGER.debug("\tHole intersections ({}): {}", current.coordinate.x, Arrays.toString(edges)); } components.get(component).add(points[i]); @@ -668,8 +688,8 @@ public class PolygonBuilder extends ShapeBuilder { * number of points to use * @return the edges creates */ - private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset, - int length) { + private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, + final int edgeOffset, int length) { assert edges.length >= length+edgeOffset; assert points.length >= length+pointOffset; edges[edgeOffset] = new Edge(points[pointOffset], null); @@ -725,26 +745,4 @@ public class PolygonBuilder extends ShapeBuilder { Objects.equals(holes, other.holes) && Objects.equals(orientation, other.orientation); } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shell.writeTo(out); - orientation.writeTo(out); - out.writeVInt(holes.size()); - for (LineStringBuilder hole : holes) { - hole.writeTo(out); - } - } - - @Override - public PolygonBuilder readFrom(StreamInput in) throws IOException { - LineStringBuilder shell = LineStringBuilder.PROTOTYPE.readFrom(in); - Orientation orientation = Orientation.readFrom(in); - PolygonBuilder polyBuilder = new PolygonBuilder(shell, orientation); - int holes = in.readVInt(); - for (int i = 0; i < holes; i++) { - polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in)); - } - return polyBuilder; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index d0c73964575..a0d77d004da 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -180,7 +180,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri out.writeDouble(coordinate.y); } - protected Coordinate readCoordinateFrom(StreamInput in) throws IOException { + protected static Coordinate readFromStream(StreamInput in) throws IOException { return new Coordinate(in.readDouble(), in.readDouble()); } @@ -519,7 +519,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri } else if (geometryCollections == null && GeoShapeType.GEOMETRYCOLLECTION == shapeType) { throw new ElasticsearchParseException("geometries not included"); } else if (radius != null && GeoShapeType.CIRCLE != shapeType) { - throw new ElasticsearchParseException("field [{}] is supported for [{}] only", CircleBuilder.FIELD_RADIUS, CircleBuilder.TYPE); + throw new ElasticsearchParseException("field [{}] is supported for [{}] only", CircleBuilder.FIELD_RADIUS, + CircleBuilder.TYPE); } switch (shapeType) { @@ -539,7 +540,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri protected static void validatePointNode(CoordinateNode node) { if (node.isEmpty()) { - throw new ElasticsearchParseException("invalid number of points (0) provided when expecting a single coordinate ([lat, lng])"); + throw new ElasticsearchParseException( + "invalid number of points (0) provided when expecting a single coordinate ([lat, lng])"); } else if (node.coordinate == null) { if (node.children.isEmpty() == false) { throw new ElasticsearchParseException("multipoint data provided when single point data expected."); @@ -559,8 +561,9 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) { // validate the coordinate array for envelope type if (coordinates.children.size() != 2) { - throw new ElasticsearchParseException("invalid number of points [{}] provided for " + - "geo_shape [{}] when expecting an array of 2 coordinates", coordinates.children.size(), GeoShapeType.ENVELOPE.shapename); + throw new ElasticsearchParseException( + "invalid number of points [{}] provided for geo_shape [{}] when expecting an array of 2 coordinates", + coordinates.children.size(), GeoShapeType.ENVELOPE.shapename); } // verify coordinate bounds, correct if necessary Coordinate uL = coordinates.children.get(0).coordinate; @@ -604,7 +607,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri * LineStringBuilder should throw a graceful exception if < 2 coordinates/points are provided */ if (coordinates.children.size() < 2) { - throw new ElasticsearchParseException("invalid number of points in LineString (found [{}] - must be >= 2)", coordinates.children.size()); + throw new ElasticsearchParseException("invalid number of points in LineString (found [{}] - must be >= 2)", + coordinates.children.size()); } CoordinatesBuilder line = new CoordinatesBuilder(); @@ -636,10 +640,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri throw new ElasticsearchParseException(error); } - int numValidPts; - if (coordinates.children.size() < (numValidPts = (coerce) ? 3 : 4)) { - throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= " + numValidPts + ")(", - coordinates.children.size()); + int numValidPts = coerce ? 3 : 4; + if (coordinates.children.size() < numValidPts) { + throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= [{}])", + coordinates.children.size(), numValidPts); } if (!coordinates.children.get(0).coordinate.equals( @@ -655,7 +659,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri protected static PolygonBuilder parsePolygon(CoordinateNode coordinates, final Orientation orientation, final boolean coerce) { if (coordinates.children == null || coordinates.children.isEmpty()) { - throw new ElasticsearchParseException("invalid LinearRing provided for type polygon. Linear ring must be an array of coordinates"); + throw new ElasticsearchParseException( + "invalid LinearRing provided for type polygon. Linear ring must be an array of coordinates"); } LineStringBuilder shell = parseLinearRing(coordinates.children.get(0), coerce); diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java index 1c828814431..5194510bcfb 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java @@ -21,6 +21,8 @@ package org.elasticsearch.common.geo.builders; import com.vividsolutions.jts.geom.Coordinate; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + import java.util.List; /** @@ -137,4 +139,16 @@ public class ShapeBuilders { public static EnvelopeBuilder newEnvelope(Coordinate topLeft, Coordinate bottomRight) { return new EnvelopeBuilder(topLeft, bottomRight); } + + public static void register(NamedWriteableRegistry namedWriteableRegistry) { + namedWriteableRegistry.register(ShapeBuilder.class, PointBuilder.TYPE.shapeName(), PointBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, CircleBuilder.TYPE.shapeName(), CircleBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, EnvelopeBuilder.TYPE.shapeName(), EnvelopeBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, MultiPointBuilder.TYPE.shapeName(), MultiPointBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, LineStringBuilder.TYPE.shapeName(), LineStringBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, MultiLineStringBuilder.TYPE.shapeName(), MultiLineStringBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, PolygonBuilder.TYPE.shapeName(), PolygonBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, MultiPolygonBuilder.TYPE.shapeName(), MultiPolygonBuilder::new); + namedWriteableRegistry.register(ShapeBuilder.class, GeometryCollectionBuilder.TYPE.shapeName(), GeometryCollectionBuilder::new); + } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index 5f3bd011dd9..b8132b4e870 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -59,6 +59,11 @@ public abstract class FilterStreamInput extends StreamInput { delegate.close(); } + @Override + public int available() throws IOException { + return delegate.available(); + } + @Override public Version getVersion() { return delegate.getVersion(); diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java index e9aa52cf4d0..d786041af49 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java @@ -74,6 +74,11 @@ public class InputStreamStreamInput extends StreamInput { is.close(); } + @Override + public int available() throws IOException { + return is.available(); + } + @Override public int read() throws IOException { return is.read(); diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java index a6d17089652..c683573df7a 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java @@ -36,7 +36,12 @@ public class NamedWriteableAwareStreamInput extends FilterStreamInput { @Override C readNamedWriteable(Class categoryClass) throws IOException { String name = readString(); - NamedWriteable namedWriteable = namedWriteableRegistry.getPrototype(categoryClass, name); - return namedWriteable.readFrom(this); + Writeable.Reader reader = namedWriteableRegistry.getReader(categoryClass, name); + C c = reader.read(this); + if (c == null) { + throw new IOException( + "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream."); + } + return c; } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java index 42014786749..5a3de923bde 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java @@ -31,54 +31,70 @@ public class NamedWriteableRegistry { private final Map, InnerRegistry> registry = new HashMap<>(); /** - * Registers a {@link NamedWriteable} prototype given its category + * Register a {@link NamedWriteable} given its category, its name, and a function to read it from the stream. + * + * This method suppresses the rawtypes warning because it intentionally using NamedWriteable instead of {@code NamedWriteable} so it + * is easier to use and because we might be able to drop the type parameter from NamedWriteable entirely some day. */ - public synchronized void registerPrototype(Class categoryClass, NamedWriteable namedWriteable) { + @SuppressWarnings("rawtypes") + public synchronized void register(Class categoryClass, String name, + Writeable.Reader reader) { @SuppressWarnings("unchecked") - InnerRegistry innerRegistry = (InnerRegistry)registry.get(categoryClass); + InnerRegistry innerRegistry = (InnerRegistry) registry.get(categoryClass); if (innerRegistry == null) { innerRegistry = new InnerRegistry<>(categoryClass); registry.put(categoryClass, innerRegistry); } - innerRegistry.registerPrototype(namedWriteable); + innerRegistry.register(name, reader); + } + + /** + * Registers a {@link NamedWriteable} prototype given its category. + * @deprecated Prefer {@link #register(Class, String, org.elasticsearch.common.io.stream.Writeable.Reader)} + */ + @Deprecated + @SuppressWarnings("rawtypes") // TODO remove this method entirely before 5.0.0 GA + public synchronized void registerPrototype(Class categoryClass, + NamedWriteable namedWriteable) { + register(categoryClass, namedWriteable.getWriteableName(), namedWriteable::readFrom); } /** * Returns a prototype of the {@link NamedWriteable} object identified by the name provided as argument and its category */ - public synchronized NamedWriteable getPrototype(Class categoryClass, String name) { + public synchronized Writeable.Reader getReader(Class categoryClass, String name) { @SuppressWarnings("unchecked") InnerRegistry innerRegistry = (InnerRegistry)registry.get(categoryClass); if (innerRegistry == null) { throw new IllegalArgumentException("unknown named writeable category [" + categoryClass.getName() + "]"); } - return innerRegistry.getPrototype(name); + return innerRegistry.getReader(name); } private static class InnerRegistry { - private final Map> registry = new HashMap<>(); + private final Map> registry = new HashMap<>(); private final Class categoryClass; private InnerRegistry(Class categoryClass) { this.categoryClass = categoryClass; } - private void registerPrototype(NamedWriteable namedWriteable) { - NamedWriteable existingNamedWriteable = registry.get(namedWriteable.getWriteableName()); - if (existingNamedWriteable != null) { - throw new IllegalArgumentException("named writeable of type [" + namedWriteable.getClass().getName() + "] with name [" + namedWriteable.getWriteableName() + "] " + - "is already registered by type [" + existingNamedWriteable.getClass().getName() + "] within category [" + categoryClass.getName() + "]"); + private void register(String name, Writeable.Reader reader) { + Writeable.Reader existingReader = registry.get(name); + if (existingReader != null) { + throw new IllegalArgumentException( + "named writeable [" + categoryClass.getName() + "][" + name + "] is already registered by [" + reader + "]"); } - registry.put(namedWriteable.getWriteableName(), namedWriteable); + registry.put(name, reader); } - private NamedWriteable getPrototype(String name) { - NamedWriteable namedWriteable = registry.get(name); - if (namedWriteable == null) { - throw new IllegalArgumentException("unknown named writeable with name [" + name + "] within category [" + categoryClass.getName() + "]"); + private Writeable.Reader getReader(String name) { + Writeable.Reader reader = registry.get(name); + if (reader == null) { + throw new IllegalArgumentException("unknown named writeable [" + categoryClass.getName() + "][" + name + "]"); } - return namedWriteable; + return reader; } } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 8eda42ae9be..a5750fcc542 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -37,10 +37,13 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; -import org.elasticsearch.search.rescore.RescoreBuilder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; +import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; +import org.elasticsearch.tasks.Task; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -282,6 +285,14 @@ public abstract class StreamInput extends InputStream { return null; } + @Nullable + public Float readOptionalFloat() throws IOException { + if (readBoolean()) { + return readFloat(); + } + return null; + } + @Nullable public Integer readOptionalVInt() throws IOException { if (readBoolean()) { @@ -362,6 +373,9 @@ public abstract class StreamInput extends InputStream { @Override public abstract void close() throws IOException; + @Override + public abstract int available() throws IOException; + public String[] readStringArray() throws IOException { int size = readVInt(); if (size == 0) { @@ -552,6 +566,19 @@ public abstract class StreamInput extends InputStream { } } + public T readOptionalWriteable(Writeable.Reader reader) throws IOException { + if (readBoolean()) { + T t = reader.read(this); + if (t == null) { + throw new IOException("Writeable.Reader [" + reader + + "] returned null which is not allowed and probably means it screwed up the stream."); + } + return t; + } else { + return null; + } + } + public T readThrowable() throws IOException { if (readBoolean()) { int key = readVInt(); @@ -666,24 +693,34 @@ public abstract class StreamInput extends InputStream { /** * Reads a {@link AggregatorBuilder} from the current stream */ - public AggregatorBuilder readAggregatorFactory() throws IOException { + public AggregatorBuilder readAggregatorFactory() throws IOException { return readNamedWriteable(AggregatorBuilder.class); } /** * Reads a {@link PipelineAggregatorBuilder} from the current stream */ - public PipelineAggregatorBuilder readPipelineAggregatorFactory() throws IOException { + public PipelineAggregatorBuilder readPipelineAggregatorFactory() throws IOException { return readNamedWriteable(PipelineAggregatorBuilder.class); } /** * Reads a {@link QueryBuilder} from the current stream */ - public QueryBuilder readQuery() throws IOException { + public QueryBuilder readQuery() throws IOException { return readNamedWriteable(QueryBuilder.class); } + /** + * Reads an optional {@link QueryBuilder}. + */ + public QueryBuilder readOptionalQuery() throws IOException { + if (readBoolean()) { + return readNamedWriteable(QueryBuilder.class); + } + return null; + } + /** * Reads a {@link ShapeBuilder} from the current stream */ @@ -698,6 +735,20 @@ public abstract class StreamInput extends InputStream { return readNamedWriteable(RescoreBuilder.class); } + /** + * Reads a {@link SuggestionBuilder} from the current stream + */ + public SuggestionBuilder readSuggestion() throws IOException { + return readNamedWriteable(SuggestionBuilder.class); + } + + /** + * Reads a {@link SortBuilder} from the current stream + */ + public SortBuilder readSortBuilder() throws IOException { + return readNamedWriteable(SortBuilder.class); + } + /** * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream */ @@ -705,6 +756,13 @@ public abstract class StreamInput extends InputStream { return readNamedWriteable(ScoreFunctionBuilder.class); } + /** + * Reads a {@link SmoothingModel} from the current stream + */ + public SmoothingModel readPhraseSuggestionSmoothingModel() throws IOException { + return readNamedWriteable(SmoothingModel.class); + } + /** * Reads a {@link Task.Status} from the current stream. */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 864da006bf0..6b7607a3e70 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -36,10 +36,13 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; -import org.elasticsearch.search.rescore.RescoreBuilder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder; +import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; +import org.elasticsearch.tasks.Task; import org.joda.time.ReadableInstant; import java.io.EOFException; @@ -238,6 +241,15 @@ public abstract class StreamOutput extends OutputStream { } } + public void writeOptionalFloat(@Nullable Float floatValue) throws IOException { + if (floatValue == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeFloat(floatValue); + } + } + public void writeOptionalText(@Nullable Text text) throws IOException { if (text == null) { writeInt(-1); @@ -520,6 +532,15 @@ public abstract class StreamOutput extends OutputStream { } } + public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException { + if (writeable != null) { + writeBoolean(true); + writeable.writeTo(this); + } else { + writeBoolean(false); + } + } + public void writeThrowable(Throwable throwable) throws IOException { if (throwable == null) { writeBoolean(false); @@ -642,7 +663,7 @@ public abstract class StreamOutput extends OutputStream { /** * Writes a {@link NamedWriteable} to the current stream, by first writing its name and then the object itself */ - void writeNamedWriteable(NamedWriteable namedWriteable) throws IOException { + void writeNamedWriteable(NamedWriteable namedWriteable) throws IOException { writeString(namedWriteable.getWriteableName()); namedWriteable.writeTo(this); } @@ -664,10 +685,22 @@ public abstract class StreamOutput extends OutputStream { /** * Writes a {@link QueryBuilder} to the current stream */ - public void writeQuery(QueryBuilder queryBuilder) throws IOException { + public void writeQuery(QueryBuilder queryBuilder) throws IOException { writeNamedWriteable(queryBuilder); } + /** + * Write an optional {@link QueryBuilder} to the stream. + */ + public void writeOptionalQuery(@Nullable QueryBuilder queryBuilder) throws IOException { + if (queryBuilder == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeQuery(queryBuilder); + } + } + /** * Writes a {@link ShapeBuilder} to the current stream */ @@ -682,6 +715,13 @@ public abstract class StreamOutput extends OutputStream { writeNamedWriteable(scoreFunctionBuilder); } + /** + * Writes the given {@link SmoothingModel} to the stream + */ + public void writePhraseSuggestionSmoothingModel(SmoothingModel smoothinModel) throws IOException { + writeNamedWriteable(smoothinModel); + } + /** * Writes a {@link Task.Status} to the current stream. */ @@ -713,4 +753,19 @@ public abstract class StreamOutput extends OutputStream { public void writeRescorer(RescoreBuilder rescorer) throws IOException { writeNamedWriteable(rescorer); } + + /** + * Writes a {@link SuggestionBuilder} to the current stream + */ + public void writeSuggestion(SuggestionBuilder suggestion) throws IOException { + writeNamedWriteable(suggestion); + } + + /** + * Writes a {@link SortBuilder} to the current stream + */ + public void writeSortBuilder(SortBuilder sort) throws IOException { + writeNamedWriteable(sort); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java index 6bb1c5653f3..bd37f5ed47b 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java @@ -23,10 +23,7 @@ import java.io.IOException; /** * Implementers can be read from {@linkplain StreamInput} by calling their {@link #readFrom(StreamInput)} method. * - * It is common for implementers of this interface to declare a public static final instance of themselves named PROTOTYPE so - * users can call {@linkplain #readFrom(StreamInput)} on it. It is also fairly typical for readFrom to be implemented as a method that just - * calls a constructor that takes {@linkplain StreamInput} as a parameter. This allows the fields in the implementer to be - * final. + * Implementers of this interface that also implement {@link Writeable} should see advice there on how to do so. */ public interface StreamableReader { /** diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java index 9ff3de736c5..75c1f28c39c 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -31,10 +31,31 @@ import java.io.IOException; * * Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable} * so this isn't always possible. + * + * The fact that this interface extends {@link StreamableReader} should be consider vestigial. Instead of using its + * {@link #readFrom(StreamInput)} method you should prefer using the Reader interface as a reference to a constructor that takes + * {@link StreamInput}. The reasoning behind this is that most "good" readFrom implementations just delegated to such a constructor anyway + * and they required an unsightly PROTOTYPE object. */ -public interface Writeable extends StreamableReader { +public interface Writeable extends StreamableReader { // TODO remove extends StreamableReader from this interface, and remove /** * Write this into the {@linkplain StreamOutput}. */ void writeTo(StreamOutput out) throws IOException; + + @Override + default T readFrom(StreamInput in) throws IOException { + // See class javadoc for reasoning + throw new UnsupportedOperationException("Prefer calling a constructor that takes a StreamInput to calling readFrom."); + } + + /** + * Reference to a method that can read some object from a stream. By convention this is a constructor that takes + * {@linkplain StreamInput} as an argument for most classes and a static method for things like enums. Returning null from one of these + * is always wrong - for that we use methods like {@link StreamInput#readOptionalWriteable(Reader)}. + */ + @FunctionalInterface + interface Reader { + R read(StreamInput t) throws IOException; + } } diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 870b5f61466..b792a85d34c 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.logging; +import org.elasticsearch.common.SuppressLoggerChecks; + /** * A logger that logs deprecation notices. */ @@ -45,6 +47,7 @@ public class DeprecationLogger { /** * Logs a deprecated message. */ + @SuppressLoggerChecks(reason = "safely delegates to logger") public void deprecated(String msg, Object... params) { logger.debug(msg, params); } diff --git a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java index 94ade9334d7..c0951c47df1 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java +++ b/core/src/main/java/org/elasticsearch/common/logging/ESLoggerFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.logging; import org.apache.log4j.Logger; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import java.util.Locale; @@ -30,9 +31,10 @@ import java.util.Locale; public abstract class ESLoggerFactory { public static final Setting LOG_DEFAULT_LEVEL_SETTING = - new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER); + new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope); public static final Setting LOG_LEVEL_SETTING = - Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER); + Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, + Property.Dynamic, Property.NodeScope); public static ESLogger getLogger(String prefix, String name) { prefix = prefix == null ? null : prefix.intern(); diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 28feca13c02..da628b09d2b 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -110,9 +110,7 @@ public class LogConfigurator { if (resolveConfig) { resolveConfig(environment, settingsBuilder); } - settingsBuilder - .putProperties("elasticsearch.", BootstrapInfo.getSystemProperties()) - .putProperties("es.", BootstrapInfo.getSystemProperties()); + settingsBuilder.putProperties("es.", BootstrapInfo.getSystemProperties()); // add custom settings after config was added so that they are not overwritten by config settingsBuilder.put(settings); settingsBuilder.replacePropertyPlaceholders(); diff --git a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java index 7031a62a999..e967ad9d79e 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java +++ b/core/src/main/java/org/elasticsearch/common/logging/TerminalAppender.java @@ -22,7 +22,7 @@ package org.elasticsearch.common.logging; import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.spi.LoggingEvent; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; /** * TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli. diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 39f34ad867e..8508a8a2e40 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -111,7 +111,7 @@ public class Lucene { try { return Version.parse(version); } catch (ParseException e) { - logger.warn("no version match {}, default to {}", version, defaultVersion, e); + logger.warn("no version match {}, default to {}", e, version, defaultVersion); return defaultVersion; } } @@ -235,11 +235,7 @@ public class Lucene { @Override protected Object doBody(String segmentFileName) throws IOException { try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) { - final int format = input.readInt(); - if (format == CodecUtil.CODEC_MAGIC) { - CodecUtil.checksumEntireFile(input); - } - // legacy.... + CodecUtil.checksumEntireFile(input); } return null; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index 754d76fed27..52de9a7e5db 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -134,7 +134,7 @@ public class MultiPhrasePrefixQuery extends Query { if (termArrays.isEmpty()) { return new MatchNoDocsQuery(); } - MultiPhraseQuery query = new MultiPhraseQuery(); + MultiPhraseQuery.Builder query = new MultiPhraseQuery.Builder(); query.setSlop(slop); int sizeMinus1 = termArrays.size() - 1; for (int i = 0; i < sizeMinus1; i++) { @@ -153,7 +153,7 @@ public class MultiPhrasePrefixQuery extends Query { return Queries.newMatchNoDocsQuery(); } query.add(terms.toArray(Term.class), position); - return query.rewrite(reader); + return query.build(); } private void getPrefixTerms(ObjectHashSet terms, final Term prefix, final IndexReader reader) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java index ea33274fad1..282f348c81b 100644 --- a/core/src/main/java/org/elasticsearch/common/network/IfConfig.java +++ b/core/src/main/java/org/elasticsearch/common/network/IfConfig.java @@ -31,14 +31,14 @@ import java.net.SocketException; import java.util.List; import java.util.Locale; -/** +/** * Simple class to log {@code ifconfig}-style output at DEBUG logging. */ final class IfConfig { - private static final ESLogger logger = Loggers.getLogger(IfConfig.class); + private static final ESLogger logger = Loggers.getLogger(IfConfig.class); private static final String INDENT = " "; - + /** log interface configuration at debug level, if its enabled */ static void logIfNecessary() { if (logger.isDebugEnabled()) { @@ -49,7 +49,7 @@ final class IfConfig { } } } - + /** perform actual logging: might throw exception if things go wrong */ private static void doLogging() throws IOException { StringBuilder msg = new StringBuilder(); @@ -59,14 +59,14 @@ final class IfConfig { // ordinary name msg.append(nic.getName()); msg.append(System.lineSeparator()); - + // display name (e.g. on windows) if (!nic.getName().equals(nic.getDisplayName())) { msg.append(INDENT); msg.append(nic.getDisplayName()); msg.append(System.lineSeparator()); } - + // addresses: v4 first, then v6 List addresses = nic.getInterfaceAddresses(); for (InterfaceAddress address : addresses) { @@ -76,7 +76,7 @@ final class IfConfig { msg.append(System.lineSeparator()); } } - + for (InterfaceAddress address : addresses) { if (address.getAddress() instanceof Inet6Address) { msg.append(INDENT); @@ -84,7 +84,7 @@ final class IfConfig { msg.append(System.lineSeparator()); } } - + // hardware address byte hardware[] = nic.getHardwareAddress(); if (hardware != null) { @@ -98,19 +98,19 @@ final class IfConfig { } msg.append(System.lineSeparator()); } - + // attributes msg.append(INDENT); msg.append(formatFlags(nic)); msg.append(System.lineSeparator()); } - logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString()); + logger.debug("configuration:{}{}", System.lineSeparator(), msg); } - + /** format internet address: java's default doesn't include everything useful */ private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException { StringBuilder sb = new StringBuilder(); - + InetAddress address = interfaceAddress.getAddress(); if (address instanceof Inet6Address) { sb.append("inet6 "); @@ -122,10 +122,10 @@ final class IfConfig { sb.append(NetworkAddress.formatAddress(address)); int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength()); sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] { - (byte)(netmask >>> 24), - (byte)(netmask >>> 16 & 0xFF), - (byte)(netmask >>> 8 & 0xFF), - (byte)(netmask & 0xFF) + (byte)(netmask >>> 24), + (byte)(netmask >>> 16 & 0xFF), + (byte)(netmask >>> 8 & 0xFF), + (byte)(netmask & 0xFF) }))); InetAddress broadcast = interfaceAddress.getBroadcast(); if (broadcast != null) { @@ -141,7 +141,7 @@ final class IfConfig { } return sb.toString(); } - + /** format network interface flags */ private static String formatFlags(NetworkInterface nic) throws SocketException { StringBuilder flags = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index b0598469d3a..712bdbe99ab 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -22,14 +22,16 @@ package org.elasticsearch.common.network; import java.util.Arrays; import java.util.List; +import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Setting.Scope; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.http.HttpServer; import org.elasticsearch.http.HttpServerTransport; @@ -139,6 +141,7 @@ import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; import org.elasticsearch.rest.action.update.RestUpdateAction; +import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.local.LocalTransport; @@ -155,10 +158,11 @@ public class NetworkModule extends AbstractModule { public static final String LOCAL_TRANSPORT = "local"; public static final String NETTY_TRANSPORT = "netty"; - public static final Setting HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER); - public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER); - public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER); - public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER); + public static final Setting HTTP_TYPE_SETTING = Setting.simpleString("http.type", Property.NodeScope); + public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); + public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = + Setting.simpleString("transport.service.type", Property.NodeScope); + public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", Property.NodeScope); @@ -325,6 +329,7 @@ public class NetworkModule extends AbstractModule { registerTransportService(NETTY_TRANSPORT, TransportService.class); registerTransport(LOCAL_TRANSPORT, LocalTransport.class); registerTransport(NETTY_TRANSPORT, NettyTransport.class); + registerTaskStatus(ReplicationTask.Status.NAME, ReplicationTask.Status::new); if (transportClient == false) { registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class); @@ -370,6 +375,10 @@ public class NetworkModule extends AbstractModule { } } + public void registerTaskStatus(String name, Writeable.Reader reader) { + namedWriteableRegistry.register(Task.Status.class, name, reader); + } + @Override protected void configure() { bind(NetworkService.class).toInstance(networkService); diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java index 5e8dbc4dcad..ff1f3912cc5 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.network; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -34,6 +35,7 @@ import java.util.HashSet; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.TimeUnit; +import java.util.function.Function; /** * @@ -43,24 +45,33 @@ public class NetworkService extends AbstractComponent { /** By default, we bind to loopback interfaces */ public static final String DEFAULT_NETWORK_HOST = "_local_"; - public static final Setting> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, - s -> s, false, Setting.Scope.CLUSTER); - public static final Setting NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER); + public static final Setting> GLOBAL_NETWORK_HOST_SETTING = + Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), Function.identity(), Property.NodeScope); + public static final Setting> GLOBAL_NETWORK_BINDHOST_SETTING = + Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope); + public static final Setting> GLOBAL_NETWORK_PUBLISHHOST_SETTING = + Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope); + public static final Setting NETWORK_SERVER = Setting.boolSetting("network.server", true, Property.NodeScope); public static final class TcpSettings { - public static final Setting TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER); - public static final Setting TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER); - public static final Setting TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER); - public static final Setting TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER); - public static final Setting TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER); - public static final Setting TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); + public static final Setting TCP_NO_DELAY = + Setting.boolSetting("network.tcp.no_delay", true, Property.NodeScope); + public static final Setting TCP_KEEP_ALIVE = + Setting.boolSetting("network.tcp.keep_alive", true, Property.NodeScope); + public static final Setting TCP_REUSE_ADDRESS = + Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), Property.NodeScope); + public static final Setting TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope); + public static final Setting TCP_BLOCKING = + Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope); + public static final Setting TCP_BLOCKING_SERVER = + Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope); + public static final Setting TCP_BLOCKING_CLIENT = + Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope); + public static final Setting TCP_CONNECT_TIMEOUT = + Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); } /** diff --git a/core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java b/core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java index a210f3ae6d5..70e6807cb92 100644 --- a/core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java +++ b/core/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java @@ -76,8 +76,9 @@ public class PropertyPlaceholder { * @param placeholderResolver the PlaceholderResolver to use for replacement. * @return the supplied value with placeholders replaced inline. */ - public String replacePlaceholders(String value, PlaceholderResolver placeholderResolver) { - Objects.requireNonNull(value, "Argument 'value' must not be null."); + public String replacePlaceholders(String key, String value, PlaceholderResolver placeholderResolver) { + Objects.requireNonNull(key); + Objects.requireNonNull(value, "value can not be null for [" + key + "]"); return parseStringValue(value, placeholderResolver, new HashSet()); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 453fc3f9a36..358706c9d3f 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -19,7 +19,11 @@ package org.elasticsearch.common.settings; +import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; @@ -30,37 +34,41 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.regex.Pattern; +import java.util.stream.Collectors; /** * A basic setting service that can be used for per-index and per-cluster settings. * This service offers transactional application of updates settings. */ public abstract class AbstractScopedSettings extends AbstractComponent { + public static final String ARCHIVED_SETTINGS_PREFIX = "archived."; private Settings lastSettingsApplied = Settings.EMPTY; private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; - private final Setting.Scope scope; + private final Setting.Property scope; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); - protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Scope scope) { + protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Property scope) { super(settings); this.lastSettingsApplied = Settings.EMPTY; this.scope = scope; Map> complexMatchers = new HashMap<>(); Map> keySettings = new HashMap<>(); for (Setting setting : settingsSet) { - if (setting.getScope() != scope) { - throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope()); - } - if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) { - throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]"); + if (setting.getProperties().contains(scope) == false) { + throw new IllegalArgumentException("Setting must be a " + scope + " setting but has: " + setting.getProperties()); } + validateSettingKey(setting); + if (setting.hasComplexMatcher()) { Setting overlappingSetting = findOverlappingSetting(setting, complexMatchers); if (overlappingSetting != null) { @@ -76,6 +84,12 @@ public abstract class AbstractScopedSettings extends AbstractComponent { this.keySettings = Collections.unmodifiableMap(keySettings); } + protected void validateSettingKey(Setting setting) { + if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) { + throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]"); + } + } + protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) { super(nodeSettings); this.lastSettingsApplied = scopeSettings; @@ -96,7 +110,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent { return GROUP_KEY_PATTERN.matcher(key).matches(); } - public Setting.Scope getScope() { + public Setting.Property getScope() { return this.scope; } @@ -216,9 +230,17 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * * Validates that all given settings are registered and valid */ public final void validate(Settings settings) { - for (Map.Entry entry : settings.getAsMap().entrySet()) { - validate(entry.getKey(), settings); + List exceptions = new ArrayList<>(); + // we want them sorted for deterministic error messages + SortedMap sortedSettings = new TreeMap<>(settings.getAsMap()); + for (Map.Entry entry : sortedSettings.entrySet()) { + try { + validate(entry.getKey(), settings); + } catch (RuntimeException ex) { + exceptions.add(ex); + } } + ExceptionsHelper.rethrowAndSuppress(exceptions); } @@ -228,7 +250,21 @@ public abstract class AbstractScopedSettings extends AbstractComponent { public final void validate(String key, Settings settings) { Setting setting = get(key); if (setting == null) { - throw new IllegalArgumentException("unknown setting [" + key + "]"); + LevensteinDistance ld = new LevensteinDistance(); + List> scoredKeys = new ArrayList<>(); + for (String k : this.keySettings.keySet()) { + float distance = ld.getDistance(key, k); + if (distance > 0.7f) { + scoredKeys.add(new Tuple<>(distance, k)); + } + } + CollectionUtil.timSort(scoredKeys, (a,b) -> b.v1().compareTo(a.v1())); + String msg = "unknown setting [" + key + "]"; + List keys = scoredKeys.stream().map((a) -> a.v2()).collect(Collectors.toList()); + if (keys.isEmpty() == false) { + msg += " did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]": "any of " + keys.toString()) + "?"; + } + throw new IllegalArgumentException(msg); } setting.get(settings); } @@ -342,8 +378,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent { * Returns the value for the given setting. */ public T get(Setting setting) { - if (setting.getScope() != scope) { - throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]"); + if (setting.getProperties().contains(scope) == false) { + throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] not in [" + + setting.getProperties() + "]"); } if (get(setting.getKey()) == null) { throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered"); @@ -442,4 +479,53 @@ public abstract class AbstractScopedSettings extends AbstractComponent { } return null; } + + /** + * Archives broken or unknown settings. Any setting that is not recognized or fails + * validation will be archived. This means the setting is prefixed with {@value ARCHIVED_SETTINGS_PREFIX} + * and remains in the settings object. This can be used to detect broken settings via APIs. + */ + public Settings archiveUnknownOrBrokenSettings(Settings settings) { + Settings.Builder builder = Settings.builder(); + boolean changed = false; + for (Map.Entry entry : settings.getAsMap().entrySet()) { + try { + Setting setting = get(entry.getKey()); + if (setting != null) { + setting.get(settings); + builder.put(entry.getKey(), entry.getValue()); + } else { + if (entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX) || isPrivateSetting(entry.getKey())) { + builder.put(entry.getKey(), entry.getValue()); + } else { + changed = true; + logger.warn("found unknown setting: {} value: {} - archiving", entry.getKey(), entry.getValue()); + // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there + // but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them. + builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue()); + } + } + } catch (IllegalArgumentException ex) { + changed = true; + logger.warn("found invalid setting: {} value: {} - archiving",ex , entry.getKey(), entry.getValue()); + // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there + // but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them. + builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue()); + } + } + if (changed) { + return builder.build(); + } else { + return settings; + } + } + + /** + * Returns true iff the setting is a private setting ie. it should be treated as valid even though it has no internal + * representation. Otherwise false + */ + // TODO this should be replaced by Setting.Property.HIDDEN or something like this. + protected boolean isPrivateSetting(String key) { + return false; + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index fa8b8c4ac41..5f587cc270d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -29,8 +29,10 @@ import org.elasticsearch.client.transport.TransportClientNodesService; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; @@ -41,10 +43,11 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDeci import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.discovery.DiscoveryModule; @@ -101,7 +104,7 @@ import java.util.function.Predicate; */ public final class ClusterSettings extends AbstractScopedSettings { public ClusterSettings(Settings nodeSettings, Set> settingsSet) { - super(nodeSettings, settingsSet, Setting.Scope.CLUSTER); + super(nodeSettings, settingsSet, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); } @@ -252,14 +255,14 @@ public final class ClusterSettings extends AbstractScopedSettings { HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, - InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, TransportService.TRACE_LOG_EXCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, - InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING, HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, Transport.TRANSPORT_TCP_COMPRESS, @@ -326,7 +329,7 @@ public final class ClusterSettings extends AbstractScopedSettings { Environment.PATH_SCRIPTS_SETTING, Environment.PATH_SHARED_DATA_SETTING, Environment.PIDFILE_SETTING, - InternalClusterService.NODE_ID_SEED_SETTING, + DiscoveryNodeService.NODE_ID_SEED_SETTING, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING, @@ -341,9 +344,8 @@ public final class ClusterSettings extends AbstractScopedSettings { ZenDiscovery.JOIN_RETRY_DELAY_SETTING, ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING, ZenDiscovery.SEND_LEAVE_REQUEST_SETTING, - ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING, ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING, - ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING, + ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING, UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING, SearchService.DEFAULT_KEEPALIVE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 69ef795812d..fb498283d7b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -34,7 +35,8 @@ import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; +import org.elasticsearch.index.percolator.PercolatorQueryCache; +import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; @@ -44,12 +46,13 @@ import org.elasticsearch.indices.IndicesRequestCache; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.function.Predicate; /** * Encapsulates all valid index level settings. - * @see org.elasticsearch.common.settings.Setting.Scope#INDEX + * @see Property#IndexScope */ public final class IndexScopedSettings extends AbstractScopedSettings { @@ -122,7 +125,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.COERCE_SETTING, Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING, - PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING, + PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, @@ -132,17 +135,23 @@ public final class IndexScopedSettings extends AbstractScopedSettings { PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, - IndexWarmer.INDEX_NORMS_LOADING_SETTING, - // this sucks but we can't really validate all the analyzers/similarity in here - Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed - Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed + // validate that built-in similarities don't get redefined + Setting.groupSetting("index.similarity.", (s) -> { + Map groups = s.getAsGroups(); + for (String key : SimilarityService.BUILT_IN.keySet()) { + if (groups.containsKey(key)) { + throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity"); + } + } + }, Property.IndexScope), // this allows similarity settings to be passed + Setting.groupSetting("index.analysis.", Property.IndexScope) // this allows analysis settings to be passed ))); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { - super(settings, settingsSet, Setting.Scope.INDEX); + super(settings, settingsSet, Property.IndexScope); } private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) { @@ -153,7 +162,16 @@ public final class IndexScopedSettings extends AbstractScopedSettings { return new IndexScopedSettings(settings, this, metaData); } - public boolean isPrivateSetting(String key) { + @Override + protected void validateSettingKey(Setting setting) { + if (setting.getKey().startsWith("index.") == false) { + throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "] must start with [index.]"); + } + super.validateSettingKey(setting); + } + + @Override + protected final boolean isPrivateSetting(String key) { switch (key) { case IndexMetaData.SETTING_CREATION_DATE: case IndexMetaData.SETTING_INDEX_UUID: diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index 0b4e43744a5..a2ebe7a2c30 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -20,37 +20,47 @@ package org.elasticsearch.common.settings; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Enumeration; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Predicate; import java.util.regex.Pattern; import java.util.stream.Collectors; /** * A setting. Encapsulates typical stuff like default value, parsing, and scope. - * Some (dynamic=true) can by modified at run time using the API. + * Some (SettingsProperty.Dynamic) can by modified at run time using the API. * All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure * together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward * to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this: *
{@code
- * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
+ * public static final Setting; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, SettingsProperty.NodeScope);}
  * 
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method. *
@@ -61,32 +71,81 @@ import java.util.stream.Collectors;
  * public enum Color {
  *     RED, GREEN, BLUE;
  * }
- * public static final Setting MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
+ * public static final Setting MY_BOOLEAN =
+ *     new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, SettingsProperty.NodeScope);
  * }
  * 
*/ public class Setting extends ToXContentToBytes { + + public enum Property { + /** + * should be filtered in some api (mask password/credentials) + */ + Filtered, + + /** + * iff this setting can be dynamically updateable + */ + Dynamic, + + /** + * mark this setting as deprecated + */ + Deprecated, + + /** + * Node scope + */ + NodeScope, + + /** + * Index scope + */ + IndexScope + } + + private static final ESLogger logger = Loggers.getLogger(Setting.class); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + private final Key key; protected final Function defaultValue; private final Function parser; - private final boolean dynamic; - private final Scope scope; + private final EnumSet properties; + + private static final EnumSet EMPTY_PROPERTIES = EnumSet.noneOf(Property.class); /** - * Creates a new Setting instance + * Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}. * @param key the settings key for this setting. * @param defaultValue a default value function that returns the default values string representation. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(Key key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + public Setting(Key key, Function defaultValue, Function parser, Property... properties) { assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; this.key = key; this.defaultValue = defaultValue; this.parser = parser; - this.dynamic = dynamic; - this.scope = scope; + if (properties == null) { + throw new IllegalArgumentException("properties can not be null for setting [" + key + "]"); + } + if (properties.length == 0) { + this.properties = EMPTY_PROPERTIES; + } else { + this.properties = EnumSet.copyOf(Arrays.asList(properties)); + } + } + + /** + * Creates a new Setting instance + * @param key the settings key for this setting. + * @param defaultValue a default value. + * @param parser a parser that parses the string rep into a complex datatype. + * @param properties properties for this setting like scope, filtering... + */ + public Setting(String key, String defaultValue, Function parser, Property... properties) { + this(key, s -> defaultValue, parser, properties); } /** @@ -94,11 +153,10 @@ public class Setting extends ToXContentToBytes { * @param key the settings key for this setting. * @param defaultValue a default value function that returns the default values string representation. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - this(new SimpleKey(key), defaultValue, parser, dynamic, scope); + public Setting(String key, Function defaultValue, Function parser, Property... properties) { + this(new SimpleKey(key), defaultValue, parser, properties); } /** @@ -106,11 +164,10 @@ public class Setting extends ToXContentToBytes { * @param key the settings key for this setting. * @param fallBackSetting a setting to fall back to if the current setting is not set. * @param parser a parser that parses the string rep into a complex datatype. - * @param dynamic true iff this setting can be dynamically updateable - * @param scope the scope of this setting + * @param properties properties for this setting like scope, filtering... */ - public Setting(String key, Setting fallBackSetting, Function parser, boolean dynamic, Scope scope) { - this(key, fallBackSetting::getRaw, parser, dynamic, scope); + public Setting(String key, Setting fallBackSetting, Function parser, Property... properties) { + this(key, fallBackSetting::getRaw, parser, properties); } /** @@ -132,17 +189,46 @@ public class Setting extends ToXContentToBytes { } /** - * Returns true iff this setting is dynamically updateable, otherwise false + * Returns true if this setting is dynamically updateable, otherwise false */ public final boolean isDynamic() { - return dynamic; + return properties.contains(Property.Dynamic); } /** - * Returns the settings scope + * Returns the setting properties + * @see Property */ - public final Scope getScope() { - return scope; + public EnumSet getProperties() { + return properties; + } + + /** + * Returns true if this setting must be filtered, otherwise false + */ + public boolean isFiltered() { + return properties.contains(Property.Filtered); + } + + /** + * Returns true if this setting has a node scope, otherwise false + */ + public boolean hasNodeScope() { + return properties.contains(Property.NodeScope); + } + + /** + * Returns true if this setting has an index scope, otherwise false + */ + public boolean hasIndexScope() { + return properties.contains(Property.IndexScope); + } + + /** + * Returns true if this setting is deprecated, otherwise false + */ + public boolean isDeprecated() { + return properties.contains(Property.Deprecated); } /** @@ -177,7 +263,7 @@ public class Setting extends ToXContentToBytes { /** * Returns true iff this setting is present in the given settings object. Otherwise false */ - public final boolean exists(Settings settings) { + public boolean exists(Settings settings) { return settings.get(getKey()) != null; } @@ -205,6 +291,12 @@ public class Setting extends ToXContentToBytes { * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. */ public String getRaw(Settings settings) { + // They're using the setting, so we need to tell them to stop + if (this.isDeprecated() && this.exists(settings)) { + // It would be convenient to show its replacement key, but replacement is often not so simple + deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " + + "See the breaking changes lists in the documentation for details", getKey()); + } return settings.get(getKey(), defaultValue.apply(settings)); } @@ -221,8 +313,7 @@ public class Setting extends ToXContentToBytes { public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("key", key.toString()); - builder.field("type", scope.name()); - builder.field("dynamic", dynamic); + builder.field("properties", properties); builder.field("is_group_setting", isGroupSetting()); builder.field("default", defaultValue.apply(Settings.EMPTY)); builder.endObject(); @@ -244,14 +335,6 @@ public class Setting extends ToXContentToBytes { return this; } - /** - * The settings scope - settings can either be cluster settings or per index settings. - */ - public enum Scope { - CLUSTER, - INDEX; - } - /** * Build a new updater with a noop validator. */ @@ -349,38 +432,34 @@ public class Setting extends ToXContentToBytes { } - public Setting(String key, String defaultValue, Function parser, boolean dynamic, Scope scope) { - this(key, (s) -> defaultValue, parser, dynamic, scope); + public static Setting floatSetting(String key, float defaultValue, Property... properties) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, properties); } - public static Setting floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); - } - - public static Setting floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { + public static Setting floatSetting(String key, float defaultValue, float minValue, Property... properties) { return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> { float value = Float.parseFloat(s); if (value < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } return value; - }, dynamic, scope); + }, properties); } - public static Setting intSetting(String key, int defaultValue, int minValue, int maxValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), dynamic, scope); + public static Setting intSetting(String key, int defaultValue, int minValue, int maxValue, Property... properties) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), properties); } - public static Setting intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope); + public static Setting intSetting(String key, int defaultValue, int minValue, Property... properties) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), properties); } - public static Setting longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope); + public static Setting longSetting(String key, long defaultValue, long minValue, Property... properties) { + return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), properties); } - public static Setting simpleString(String key, boolean dynamic, Scope scope) { - return new Setting<>(key, "", Function.identity(), dynamic, scope); + public static Setting simpleString(String key, Property... properties) { + return new Setting<>(key, s -> "", Function.identity(), properties); } public static int parseInt(String s, int minValue, String key) { @@ -414,51 +493,80 @@ public class Setting extends ToXContentToBytes { return timeValue; } - public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { - return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope); + public static Setting intSetting(String key, int defaultValue, Property... properties) { + return intSetting(key, defaultValue, Integer.MIN_VALUE, properties); } - public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + public static Setting boolSetting(String key, boolean defaultValue, Property... properties) { + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties); } - public static Setting boolSetting(String key, Setting fallbackSetting, boolean dynamic, Scope scope) { - return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope); + public static Setting boolSetting(String key, Setting fallbackSetting, Property... properties) { + return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties); } - public static Setting byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + public static Setting byteSizeSetting(String key, String percentage, Property... properties) { + return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); } - public static Setting byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { - return byteSizeSetting(key, (s) -> value.toString(), dynamic, scope); + public static Setting byteSizeSetting(String key, ByteSizeValue value, Property... properties) { + return byteSizeSetting(key, (s) -> value.toString(), properties); } - public static Setting byteSizeSetting(String key, Setting fallbackSettings, boolean dynamic, Scope scope) { - return byteSizeSetting(key, fallbackSettings::getRaw, dynamic, scope); + public static Setting byteSizeSetting(String key, Setting fallbackSettings, + Property... properties) { + return byteSizeSetting(key, fallbackSettings::getRaw, properties); } - public static Setting byteSizeSetting(String key, Function defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + public static Setting byteSizeSetting(String key, Function defaultValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties); } - public static Setting positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); + public static Setting byteSizeSetting(String key, ByteSizeValue value, ByteSizeValue minValue, ByteSizeValue maxValue, + Property... properties) { + return byteSizeSetting(key, (s) -> value.toString(), minValue, maxValue, properties); } - public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { - return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope); + public static Setting byteSizeSetting(String key, Function defaultValue, + ByteSizeValue minValue, ByteSizeValue maxValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> parseByteSize(s, minValue, maxValue, key), properties); } - public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, boolean dynamic, Scope scope) { - return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope); + public static ByteSizeValue parseByteSize(String s, ByteSizeValue minValue, ByteSizeValue maxValue, String key) { + ByteSizeValue value = ByteSizeValue.parseBytesSizeValue(s, key); + if (value.bytes() < minValue.bytes()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + if (value.bytes() > maxValue.bytes()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue); + } + return value; } - public static Setting> listSetting(String key, Function> defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + public static Setting positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) { + return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties); + } + + public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, + Property... properties) { + return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties); + } + + public static Setting> listSetting(String key, Setting> fallbackSetting, Function singleValueParser, + Property... properties) { + return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, properties); + } + + public static Setting> listSetting(String key, Function> defaultStringValue, + Function singleValueParser, Property... properties) { Function> parser = (s) -> parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList()); - return new Setting>(new ListKey(key), (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + return new Setting>(new ListKey(key), + (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) { + private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); @Override public String getRaw(Settings settings) { String[] array = settings.getAsArray(getKey(), null); @@ -505,17 +613,45 @@ public class Setting extends ToXContentToBytes { throw new ElasticsearchException(ex); } } - - public static Setting groupSetting(String key, boolean dynamic, Scope scope) { - return new Setting(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) { + public static Setting groupSetting(String key, Property... properties) { + return groupSetting(key, (s) -> {}, properties); + } + public static Setting groupSetting(String key, Consumer validator, Property... properties) { + return new Setting(new GroupKey(key), (s) -> "", (s) -> null, properties) { @Override public boolean isGroupSetting() { return true; } + @Override + public String getRaw(Settings settings) { + Settings subSettings = get(settings); + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + subSettings.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return builder.string(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + @Override public Settings get(Settings settings) { - return settings.getByPrefix(getKey()); + Settings byPrefix = settings.getByPrefix(getKey()); + validator.accept(byPrefix); + return byPrefix; + } + + @Override + public boolean exists(Settings settings) { + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith(key)) { + return true; + } + } + return false; } @Override @@ -560,30 +696,37 @@ public class Setting extends ToXContentToBytes { }; } - public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return new Setting<>(key, defaultValue, (s) -> parseTimeValue(s, minValue, key), dynamic, scope); + public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, + Property... properties) { + return new Setting<>(key, defaultValue, (s) -> { + TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); + if (timeValue.millis() < minValue.millis()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return timeValue; + }, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { - return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope); + public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) { + return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties); } - public static Setting timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { - return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope); + public static Setting timeSetting(String key, TimeValue defaultValue, Property... properties) { + return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties); } - public static Setting timeSetting(String key, Setting fallbackSetting, boolean dynamic, Scope scope) { - return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope); + public static Setting timeSetting(String key, Setting fallbackSetting, Property... properties) { + return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), properties); } - public static Setting doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { + public static Setting doubleSetting(String key, double defaultValue, double minValue, Property... properties) { return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { final double d = Double.parseDouble(s); if (d < minValue) { throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); } return d; - }, dynamic, scope); + }, properties); } @Override @@ -604,8 +747,9 @@ public class Setting extends ToXContentToBytes { * can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless * {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, boolean dynamic, Scope scope) { - return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope); + public static Setting prefixKeySetting(String prefix, String defaultValue, Function parser, + Property... properties) { + return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties); } /** @@ -613,16 +757,19 @@ public class Setting extends ToXContentToBytes { * storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters * out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater. */ - public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope); + public static Setting adfixKeySetting(String prefix, String suffix, Function defaultValue, + Function parser, Property... properties) { + return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, properties); } - public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, boolean dynamic, Scope scope) { - return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope); + public static Setting adfixKeySetting(String prefix, String suffix, String defaultValue, Function parser, + Property... properties) { + return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties); } - public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { - return new Setting(key, defaultValue, parser, dynamic, scope) { + public static Setting affixKeySetting(AffixKey key, Function defaultValue, Function parser, + Property... properties) { + return new Setting(key, defaultValue, parser, properties) { @Override boolean isGroupSetting() { @@ -637,7 +784,7 @@ public class Setting extends ToXContentToBytes { @Override public Setting getConcreteSetting(String key) { if (match(key)) { - return new Setting<>(key, defaultValue, parser, dynamic, scope); + return new Setting<>(key, defaultValue, parser, properties); } else { throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't."); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index aafaff3e9d7..ce79bf92d20 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -761,6 +761,14 @@ public final class Settings implements ToXContent { return builder; } + /** + * Returns true if this settings object contains no settings + * @return true if this settings object contains no settings + */ + public boolean isEmpty() { + return this.settings.isEmpty(); + } + /** * A builder allowing to put different settings and then {@link #build()} an immutable * settings implementation. Use {@link Settings#settingsBuilder()} in order to @@ -1136,10 +1144,10 @@ public final class Settings implements ToXContent { * @param properties The properties to put * @return The builder */ - public Builder putProperties(String prefix, Dictionary properties) { - for (Object key1 : Collections.list(properties.keys())) { - String key = Objects.toString(key1); - String value = Objects.toString(properties.get(key)); + public Builder putProperties(String prefix, Dictionary properties) { + for (Object property : Collections.list(properties.keys())) { + String key = Objects.toString(property); + String value = Objects.toString(properties.get(property)); if (key.startsWith(prefix)) { map.put(key.substring(prefix.length()), value); } @@ -1154,19 +1162,12 @@ public final class Settings implements ToXContent { * @param properties The properties to put * @return The builder */ - public Builder putProperties(String prefix, Dictionary properties, String[] ignorePrefixes) { - for (Object key1 : Collections.list(properties.keys())) { - String key = Objects.toString(key1); - String value = Objects.toString(properties.get(key)); + public Builder putProperties(String prefix, Dictionary properties, String ignorePrefix) { + for (Object property : Collections.list(properties.keys())) { + String key = Objects.toString(property); + String value = Objects.toString(properties.get(property)); if (key.startsWith(prefix)) { - boolean ignore = false; - for (String ignorePrefix : ignorePrefixes) { - if (key.startsWith(ignorePrefix)) { - ignore = true; - break; - } - } - if (!ignore) { + if (!key.startsWith(ignorePrefix)) { map.put(key.substring(prefix.length()), value); } } @@ -1220,7 +1221,7 @@ public final class Settings implements ToXContent { } }; for (Map.Entry entry : new HashMap<>(map).entrySet()) { - String value = propertyPlaceholder.replacePlaceholders(entry.getValue(), placeholderResolver); + String value = propertyPlaceholder.replacePlaceholders(entry.getKey(), entry.getValue(), placeholderResolver); // if the values exists and has length, we should maintain it in the map // otherwise, the replace process resolved into removing it if (Strings.hasLength(value)) { diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index b06f53459c8..2e7acd6ae8c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -20,13 +20,22 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.tribe.TribeService; +import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.IntStream; /** * A module that binds the provided settings to the {@link Settings} interface. @@ -35,11 +44,14 @@ public class SettingsModule extends AbstractModule { private final Settings settings; private final Set settingsFilterPattern = new HashSet<>(); - private final Map> clusterSettings = new HashMap<>(); + private final Map> nodeSettings = new HashMap<>(); private final Map> indexSettings = new HashMap<>(); - private static final Predicate TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false; + private static final Predicate TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") + && TribeService.TRIBE_SETTING_KEYS.contains(s) == false; + private final ESLogger logger; public SettingsModule(Settings settings) { + logger = Loggers.getLogger(getClass(), settings); this.settings = settings; for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { registerSetting(setting); @@ -52,10 +64,59 @@ public class SettingsModule extends AbstractModule { @Override protected void configure() { final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); - final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values())); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); + Settings indexSettings = settings.filter((s) -> s.startsWith("index.") && clusterSettings.get(s) == null); + if (indexSettings.isEmpty() == false) { + try { + String separator = IntStream.range(0, 85).mapToObj(s -> "*").collect(Collectors.joining("")).trim(); + StringBuilder builder = new StringBuilder(); + builder.append(System.lineSeparator()); + builder.append(separator); + builder.append(System.lineSeparator()); + builder.append("Found index level settings on node level configuration."); + builder.append(System.lineSeparator()); + builder.append(System.lineSeparator()); + int count = 0; + for (String word : ("Since elasticsearch 5.x index level settings can NOT be set on the nodes configuration like " + + "the elasticsearch.yaml, in system properties or command line arguments." + + "In order to upgrade all indices the settings must be updated via the /${index}/_settings API. " + + "Unless all settings are dynamic all indices must be closed in order to apply the upgrade" + + "Indices created in the future should use index templates to set default values." + ).split(" ")) { + if (count + word.length() > 85) { + builder.append(System.lineSeparator()); + count = 0; + } + count += word.length() + 1; + builder.append(word).append(" "); + } + + builder.append(System.lineSeparator()); + builder.append(System.lineSeparator()); + builder.append("Please ensure all required values are updated on all indices by executing: "); + builder.append(System.lineSeparator()); + builder.append(System.lineSeparator()); + builder.append("curl -XPUT 'http://localhost:9200/_all/_settings?preserve_existing=true' -d '"); + try (XContentBuilder xContentBuilder = XContentBuilder.builder(XContentType.JSON.xContent())) { + xContentBuilder.prettyPrint(); + xContentBuilder.startObject(); + indexSettings.toXContent(xContentBuilder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "true"))); + xContentBuilder.endObject(); + builder.append(xContentBuilder.string()); + } + builder.append("'"); + builder.append(System.lineSeparator()); + builder.append(separator); + builder.append(System.lineSeparator()); + + logger.warn(builder.toString()); + throw new IllegalArgumentException("node settings must not contain any index level settings"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } // by now we are fully configured, lets check node level settings for unregistered index settings - indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)); - final Predicate acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate(); + final Predicate acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.negate(); clusterSettings.validate(settings.filter(acceptOnlyClusterSettings)); validateTribeSettings(settings, clusterSettings); bind(Settings.class).toInstance(settings); @@ -71,19 +132,26 @@ public class SettingsModule extends AbstractModule { * the setting during startup. */ public void registerSetting(Setting setting) { - switch (setting.getScope()) { - case CLUSTER: - if (clusterSettings.containsKey(setting.getKey())) { + if (setting.isFiltered()) { + if (settingsFilterPattern.contains(setting.getKey()) == false) { + registerSettingsFilter(setting.getKey()); + } + } + if (setting.hasNodeScope() || setting.hasIndexScope()) { + if (setting.hasNodeScope()) { + if (nodeSettings.containsKey(setting.getKey())) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } - clusterSettings.put(setting.getKey(), setting); - break; - case INDEX: + nodeSettings.put(setting.getKey(), setting); + } + if (setting.hasIndexScope()) { if (indexSettings.containsKey(setting.getKey())) { throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); } indexSettings.put(setting.getKey(), setting); - break; + } + } else { + throw new IllegalArgumentException("No scope found for setting [" + setting.getKey() + "]"); } } @@ -101,21 +169,15 @@ public class SettingsModule extends AbstractModule { settingsFilterPattern.add(filter); } - public void registerSettingsFilterIfMissing(String filter) { - if (settingsFilterPattern.contains(filter) == false) { - registerSettingsFilter(filter); - } - } - /** * Check if a setting has already been registered */ public boolean exists(Setting setting) { - switch (setting.getScope()) { - case CLUSTER: - return clusterSettings.containsKey(setting.getKey()); - case INDEX: - return indexSettings.containsKey(setting.getKey()); + if (setting.hasNodeScope()) { + return nodeSettings.containsKey(setting.getKey()); + } + if (setting.hasIndexScope()) { + return indexSettings.containsKey(setting.getKey()); } throw new IllegalArgumentException("setting scope is unknown. This should never happen!"); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java index f6f77192c75..02f7a5c37a0 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java @@ -27,6 +27,10 @@ import org.elasticsearch.common.xcontent.XContentType; */ public class JsonSettingsLoader extends XContentSettingsLoader { + public JsonSettingsLoader(boolean allowNullValues) { + super(allowNullValues); + } + @Override public XContentType contentType() { return XContentType.JSON; diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java index 57c9419f5b2..6ee1f58cf4b 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java @@ -24,10 +24,12 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.io.stream.StreamInput; +import java.io.Closeable; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Properties; +import java.util.function.Supplier; /** * Settings loader that loads (parses) the settings in a properties format. @@ -36,42 +38,49 @@ public class PropertiesSettingsLoader implements SettingsLoader { @Override public Map load(String source) throws IOException { - Properties props = new NoDuplicatesProperties(); - FastStringReader reader = new FastStringReader(source); - try { - props.load(reader); - Map result = new HashMap<>(); - for (Map.Entry entry : props.entrySet()) { - result.put((String) entry.getKey(), (String) entry.getValue()); - } - return result; - } finally { - IOUtils.closeWhileHandlingException(reader); - } + return load(() -> new FastStringReader(source), (reader, props) -> props.load(reader)); } @Override public Map load(byte[] source) throws IOException { - Properties props = new NoDuplicatesProperties(); - StreamInput stream = StreamInput.wrap(source); + return load(() -> StreamInput.wrap(source), (inStream, props) -> props.load(inStream)); + } + + private final Map load( + Supplier supplier, + IOExceptionThrowingBiConsumer properties + ) throws IOException { + T t = null; try { - props.load(stream); - Map result = new HashMap<>(); + t = supplier.get(); + final Properties props = new NoDuplicatesProperties(); + properties.accept(t, props); + final Map result = new HashMap<>(); for (Map.Entry entry : props.entrySet()) { result.put((String) entry.getKey(), (String) entry.getValue()); } return result; } finally { - IOUtils.closeWhileHandlingException(stream); + IOUtils.closeWhileHandlingException(t); } } + @FunctionalInterface + private interface IOExceptionThrowingBiConsumer { + void accept(T t, U u) throws IOException; + } + class NoDuplicatesProperties extends Properties { @Override public synchronized Object put(Object key, Object value) { - Object previousValue = super.put(key, value); + final Object previousValue = super.put(key, value); if (previousValue != null) { - throw new ElasticsearchParseException("duplicate settings key [{}] found, previous value [{}], current value [{}]", key, previousValue, value); + throw new ElasticsearchParseException( + "duplicate settings key [{}] found, previous value [{}], current value [{}]", + key, + previousValue, + value + ); } return previousValue; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java b/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java index e55cb1092f2..5bf9916ee0e 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java @@ -20,43 +20,63 @@ package org.elasticsearch.common.settings.loader; /** - * A settings loader factory automatically trying to identify what type of - * {@link SettingsLoader} to use. - * - * + * A class holding factory methods for settings loaders that attempts + * to infer the type of the underlying settings content. */ public final class SettingsLoaderFactory { private SettingsLoaderFactory() { - } /** - * Returns a {@link SettingsLoader} based on the resource name. + * Returns a {@link SettingsLoader} based on the source resource + * name. This factory method assumes that if the resource name ends + * with ".json" then the content should be parsed as JSON, else if + * the resource name ends with ".yml" or ".yaml" then the content + * should be parsed as YAML, else if the resource name ends with + * ".properties" then the content should be parsed as properties, + * otherwise default to attempting to parse as JSON. Note that the + * parsers returned by this method will not accept null-valued + * keys. + * + * @param resourceName The resource name containing the settings + * content. + * @return A settings loader. */ public static SettingsLoader loaderFromResource(String resourceName) { if (resourceName.endsWith(".json")) { - return new JsonSettingsLoader(); + return new JsonSettingsLoader(false); } else if (resourceName.endsWith(".yml") || resourceName.endsWith(".yaml")) { - return new YamlSettingsLoader(); + return new YamlSettingsLoader(false); } else if (resourceName.endsWith(".properties")) { return new PropertiesSettingsLoader(); } else { // lets default to the json one - return new JsonSettingsLoader(); + return new JsonSettingsLoader(false); } } /** - * Returns a {@link SettingsLoader} based on the actual settings source. + * Returns a {@link SettingsLoader} based on the source content. + * This factory method assumes that if the underlying content + * contains an opening and closing brace ('{' and '}') then the + * content should be parsed as JSON, else if the underlying content + * fails this condition but contains a ':' then the content should + * be parsed as YAML, and otherwise should be parsed as properties. + * Note that the JSON and YAML parsers returned by this method will + * accept null-valued keys. + * + * @param source The underlying settings content. + * @return A settings loader. */ public static SettingsLoader loaderFromSource(String source) { if (source.indexOf('{') != -1 && source.indexOf('}') != -1) { - return new JsonSettingsLoader(); + return new JsonSettingsLoader(true); } if (source.indexOf(':') != -1) { - return new YamlSettingsLoader(); + return new YamlSettingsLoader(true); } return new PropertiesSettingsLoader(); } + } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java index 9c2f973b96e..3875c1ef85a 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java @@ -38,6 +38,12 @@ public abstract class XContentSettingsLoader implements SettingsLoader { public abstract XContentType contentType(); + private final boolean allowNullValues; + + XContentSettingsLoader(boolean allowNullValues) { + this.allowNullValues = allowNullValues; + } + @Override public Map load(String source) throws IOException { try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(source)) { @@ -153,6 +159,16 @@ public abstract class XContentSettingsLoader implements SettingsLoader { currentValue ); } + + if (currentValue == null && !allowNullValues) { + throw new ElasticsearchParseException( + "null-valued setting found for key [{}] found at line number [{}], column number [{}]", + key, + parser.getTokenLocation().lineNumber, + parser.getTokenLocation().columnNumber + ); + } + settings.put(key, currentValue); } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java index 248fe090b5d..12cde976691 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java @@ -30,6 +30,10 @@ import java.util.Map; */ public class YamlSettingsLoader extends XContentSettingsLoader { + public YamlSettingsLoader(boolean allowNullValues) { + super(allowNullValues); + } + @Override public XContentType contentType() { return XContentType.YAML; diff --git a/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java b/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java index d0e91646c0f..b34c1101f9b 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java +++ b/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java @@ -210,34 +210,6 @@ public enum DistanceUnit implements Writeable { return defaultUnit; } - /** - * Write a {@link DistanceUnit} to a {@link StreamOutput} - * - * @param out {@link StreamOutput} to write to - * @param unit {@link DistanceUnit} to write - */ - public static void writeDistanceUnit(StreamOutput out, DistanceUnit unit) throws IOException { - out.writeByte((byte) unit.ordinal()); - } - - /** - * Read a {@link DistanceUnit} from a {@link StreamInput} - * - * @param in {@link StreamInput} to read the {@link DistanceUnit} from - * @return {@link DistanceUnit} read from the {@link StreamInput} - * @throws IOException if no unit can be read from the {@link StreamInput} - * @throws IllegalArgumentException if no matching {@link DistanceUnit} can be found - */ - public static DistanceUnit readDistanceUnit(StreamInput in) throws IOException { - byte b = in.readByte(); - - if(b<0 || b>=values().length) { - throw new IllegalArgumentException("No type for distance unit matching [" + b + "]"); - } else { - return values()[b]; - } - } - /** * This class implements a value+unit tuple. */ @@ -324,23 +296,30 @@ public enum DistanceUnit implements Writeable { } } - private static final DistanceUnit PROTOTYPE = DEFAULT; + /** + * Read a {@link DistanceUnit} from a {@link StreamInput}. + * + * @param in {@link StreamInput} to read the {@link DistanceUnit} from + * @return {@link DistanceUnit} read from the {@link StreamInput} + * @throws IOException if no unit can be read from the {@link StreamInput} + * @throws IllegalArgumentException if no matching {@link DistanceUnit} can be found + */ + public static DistanceUnit readFromStream(StreamInput in) throws IOException { + byte b = in.readByte(); - @Override - public DistanceUnit readFrom(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown DistanceUnit ordinal [" + ordinal + "]"); + if (b < 0 || b >= values().length) { + throw new IllegalArgumentException("No type for distance unit matching [" + b + "]"); } - return values()[ordinal]; - } - - public static DistanceUnit readUnitFrom(StreamInput in) throws IOException { - return PROTOTYPE.readFrom(in); + return values()[b]; } + /** + * Write a {@link DistanceUnit} to a {@link StreamOutput}. + * + * @param out {@link StreamOutput} to write to + */ @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeByte((byte) this.ordinal()); } } diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java new file mode 100644 index 00000000000..221dc234511 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; + +/** + * Renames index folders from {index.name} to {index.uuid} + */ +public class IndexFolderUpgrader { + private final NodeEnvironment nodeEnv; + private final Settings settings; + private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class); + + /** + * Creates a new upgrader instance + * @param settings node settings + * @param nodeEnv the node env to operate on + */ + IndexFolderUpgrader(Settings settings, NodeEnvironment nodeEnv) { + this.settings = settings; + this.nodeEnv = nodeEnv; + } + + /** + * Moves the index folder found in source to target + */ + void upgrade(final Index index, final Path source, final Path target) throws IOException { + boolean success = false; + try { + Files.move(source, target, StandardCopyOption.ATOMIC_MOVE); + success = true; + } catch (NoSuchFileException | FileNotFoundException exception) { + // thrown when the source is non-existent because the folder was renamed + // by another node (shared FS) after we checked if the target exists + logger.error("multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node", + exception, target); + throw exception; + } finally { + if (success) { + logger.info("{} moved from [{}] to [{}]", index, source, target); + logger.trace("{} syncing directory [{}]", index, target); + IOUtils.fsync(target, true); + } + } + } + + /** + * Renames indexFolderName index folders found in node paths and custom path + * iff {@link #needsUpgrade(Index, String)} is true. + * Index folder in custom paths are renamed first followed by index folders in each node path. + */ + void upgrade(final String indexFolderName) throws IOException { + for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) { + final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName); + final IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, indexFolderPath); + if (indexMetaData != null) { + final Index index = indexMetaData.getIndex(); + if (needsUpgrade(index, indexFolderName)) { + logger.info("{} upgrading [{}] to new naming convention", index, indexFolderPath); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + if (indexSettings.hasCustomDataPath()) { + // we rename index folder in custom path before renaming them in any node path + // to have the index state under a not-yet-upgraded index folder, which we use to + // continue renaming after a incomplete upgrade. + final Path customLocationSource = nodeEnv.resolveBaseCustomLocation(indexSettings) + .resolve(indexFolderName); + final Path customLocationTarget = customLocationSource.resolveSibling(index.getUUID()); + // we rename the folder in custom path only the first time we encounter a state + // in a node path, which needs upgrading, it is a no-op for subsequent node paths + if (Files.exists(customLocationSource) // might not exist if no data was written for this index + && Files.exists(customLocationTarget) == false) { + upgrade(index, customLocationSource, customLocationTarget); + } else { + logger.info("[{}] no upgrade needed - already upgraded", customLocationTarget); + } + } + upgrade(index, indexFolderPath, indexFolderPath.resolveSibling(index.getUUID())); + } else { + logger.debug("[{}] no upgrade needed - already upgraded", indexFolderPath); + } + } else { + logger.warn("[{}] no index state found - ignoring", indexFolderPath); + } + } + } + + /** + * Upgrades all indices found under nodeEnv. Already upgraded indices are ignored. + */ + public static void upgradeIndicesIfNeeded(final Settings settings, final NodeEnvironment nodeEnv) throws IOException { + final IndexFolderUpgrader upgrader = new IndexFolderUpgrader(settings, nodeEnv); + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + upgrader.upgrade(indexFolderName); + } + } + + static boolean needsUpgrade(Index index, String indexFolderName) { + return indexFolderName.equals(index.getUUID()) == false; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 10b1412425c..df1288d4fd2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.Arrays; @@ -41,7 +42,8 @@ public class EsExecutors { * Settings key to manually set the number of available processors. * This is used to adjust thread pools sizes etc. per node. */ - public static final Setting PROCESSORS_SETTING = Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, false, Setting.Scope.CLUSTER) ; + public static final Setting PROCESSORS_SETTING = + Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope); /** * Returns the number of processors available but at most 32. diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index fde8d828295..2f664679bb4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -40,11 +40,14 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { */ private final String name; - EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) { + EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, + BlockingQueue workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) { this(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new EsAbortPolicy(), contextHolder); } - EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler, ThreadContext contextHolder) { + EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, + BlockingQueue workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler, + ThreadContext contextHolder) { super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler); this.name = name; this.contextHolder = contextHolder; @@ -133,112 +136,10 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { } protected Runnable wrapRunnable(Runnable command) { - final Runnable wrappedCommand; - if (command instanceof AbstractRunnable) { - wrappedCommand = new FilterAbstractRunnable(contextHolder, (AbstractRunnable) command); - } else { - wrappedCommand = new FilterRunnable(contextHolder, command); - } - return wrappedCommand; + return contextHolder.preserveContext(command); } protected Runnable unwrap(Runnable runnable) { - if (runnable instanceof FilterAbstractRunnable) { - return ((FilterAbstractRunnable) runnable).in; - } else if (runnable instanceof FilterRunnable) { - return ((FilterRunnable) runnable).in; - } - return runnable; + return contextHolder.unwrap(runnable); } - - private class FilterAbstractRunnable extends AbstractRunnable { - private final ThreadContext contextHolder; - private final AbstractRunnable in; - private final ThreadContext.StoredContext ctx; - - FilterAbstractRunnable(ThreadContext contextHolder, AbstractRunnable in) { - this.contextHolder = contextHolder; - ctx = contextHolder.newStoredContext(); - this.in = in; - } - - @Override - public boolean isForceExecution() { - return in.isForceExecution(); - } - - @Override - public void onAfter() { - in.onAfter(); - } - - @Override - public void onFailure(Throwable t) { - in.onFailure(t); - } - - @Override - public void onRejection(Throwable t) { - in.onRejection(t); - } - - @Override - protected void doRun() throws Exception { - boolean whileRunning = false; - try (ThreadContext.StoredContext ingore = contextHolder.stashContext()){ - ctx.restore(); - whileRunning = true; - in.doRun(); - whileRunning = false; - } catch (IllegalStateException ex) { - if (whileRunning || isShutdown() == false) { - throw ex; - } - // if we hit an ISE here we have been shutting down - // this comes from the threadcontext and barfs if - // our threadpool has been shutting down - } - } - - @Override - public String toString() { - return in.toString(); - } - - } - - private class FilterRunnable implements Runnable { - private final ThreadContext contextHolder; - private final Runnable in; - private final ThreadContext.StoredContext ctx; - - FilterRunnable(ThreadContext contextHolder, Runnable in) { - this.contextHolder = contextHolder; - ctx = contextHolder.newStoredContext(); - this.in = in; - } - - @Override - public void run() { - boolean whileRunning = false; - try (ThreadContext.StoredContext ingore = contextHolder.stashContext()){ - ctx.restore(); - whileRunning = true; - in.run(); - whileRunning = false; - } catch (IllegalStateException ex) { - if (whileRunning || isShutdown() == false) { - throw ex; - } - // if we hit an ISE here we have been shutting down - // this comes from the threadcontext and barfs if - // our threadpool has been shutting down - } - } - @Override - public String toString() { - return in.toString(); - } - } - } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 83bb9fd690d..5c30330c156 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -20,7 +20,10 @@ package org.elasticsearch.common.util.concurrent; +import org.elasticsearch.common.lease.Releasable; + import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; @@ -29,9 +32,8 @@ import java.util.concurrent.locks.ReentrantLock; * created the first time they are acquired and removed if no thread hold the * lock. The latter is important to assure that the list of locks does not grow * infinitely. - * - * A Thread can acquire a lock only once. - * + * + * * */ public class KeyedLock { @@ -50,48 +52,38 @@ public class KeyedLock { private final ConcurrentMap map = ConcurrentCollections.newConcurrentMap(); - protected final ThreadLocal threadLocal = new ThreadLocal<>(); - - public void acquire(T key) { + public Releasable acquire(T key) { + assert isHeldByCurrentThread(key) == false : "lock for " + key + " is already heald by this thread"; while (true) { - if (threadLocal.get() != null) { - // if we are here, the thread already has the lock - throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() - + " for key " + key); - } KeyLock perNodeLock = map.get(key); if (perNodeLock == null) { KeyLock newLock = new KeyLock(fair); perNodeLock = map.putIfAbsent(key, newLock); if (perNodeLock == null) { newLock.lock(); - threadLocal.set(newLock); - return; + return new ReleasableLock(key, newLock); } } assert perNodeLock != null; int i = perNodeLock.count.get(); if (i > 0 && perNodeLock.count.compareAndSet(i, i + 1)) { perNodeLock.lock(); - threadLocal.set(perNodeLock); - return; + return new ReleasableLock(key, perNodeLock); } } } - public void release(T key) { - KeyLock lock = threadLocal.get(); + public boolean isHeldByCurrentThread(T key) { + KeyLock lock = map.get(key); if (lock == null) { - throw new IllegalStateException("Lock not acquired"); + return false; } - release(key, lock); + return lock.isHeldByCurrentThread(); } void release(T key, KeyLock lock) { - assert lock.isHeldByCurrentThread(); assert lock == map.get(key); lock.unlock(); - threadLocal.set(null); int decrementAndGet = lock.count.decrementAndGet(); if (decrementAndGet == 0) { map.remove(key, lock); @@ -99,6 +91,24 @@ public class KeyedLock { } + private final class ReleasableLock implements Releasable { + final T key; + final KeyLock lock; + final AtomicBoolean closed = new AtomicBoolean(); + + private ReleasableLock(T key, KeyLock lock) { + this.key = key; + this.lock = lock; + } + + @Override + public void close() { + if (closed.compareAndSet(false, true)) { + release(key, lock); + } + } + } + @SuppressWarnings("serial") private final static class KeyLock extends ReentrantLock { KeyLock(boolean fair) { diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 1928392fe41..462b4f539dc 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -19,11 +19,11 @@ package org.elasticsearch.common.util.concurrent; import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.Closeable; @@ -63,7 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public final class ThreadContext implements Closeable, Writeable{ public static final String PREFIX = "request.headers"; - public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", false, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); private final Map defaultHeader; private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap()); private final ContextThreadLocal threadLocal; @@ -200,6 +200,36 @@ public final class ThreadContext implements Closeable, Writeablecommand has already been passed through this method then it is returned unaltered rather than wrapped twice. + */ + public Runnable preserveContext(Runnable command) { + if (command instanceof ContextPreservingAbstractRunnable) { + return command; + } + if (command instanceof ContextPreservingRunnable) { + return command; + } + if (command instanceof AbstractRunnable) { + return new ContextPreservingAbstractRunnable((AbstractRunnable) command); + } + return new ContextPreservingRunnable(command); + } + + /** + * Unwraps a command that was previously wrapped by {@link #preserveContext(Runnable)}. + */ + public Runnable unwrap(Runnable command) { + if (command instanceof ContextPreservingAbstractRunnable) { + return ((ContextPreservingAbstractRunnable) command).unwrap(); + } + if (command instanceof ContextPreservingRunnable) { + return ((ContextPreservingRunnable) command).unwrap(); + } + return command; + } + public interface StoredContext extends AutoCloseable { @Override void close(); @@ -356,4 +386,104 @@ public final class ThreadContext implements Closeable, Writeable readList(XContentParser parser, MapFactory mapFactory) throws IOException { XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } if (token == XContentParser.Token.FIELD_NAME) { token = parser.nextToken(); } if (token == XContentParser.Token.START_ARRAY) { token = parser.nextToken(); + } else { + throw new ElasticsearchParseException("Failed to parse list: expecting " + + XContentParser.Token.START_ARRAY + " but got " + token); } + ArrayList list = new ArrayList<>(); - for (; token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { + for (; token != null && token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { list.add(readValue(parser, mapFactory, token)); } return list; diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e08757a3f2a..4076b880d6f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.discovery.local.LocalDiscovery; @@ -45,10 +46,11 @@ import java.util.function.Function; */ public class DiscoveryModule extends AbstractModule { - public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", - settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type", - "zen", Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting DISCOVERY_TYPE_SETTING = + new Setting<>("discovery.type", settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), + Property.NodeScope); + public static final Setting ZEN_MASTER_SERVICE_TYPE_SETTING = + new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope); private final Settings settings; private final Map>> unicastHostProviders = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index b899f0a8afc..ca7ab342cd5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestStatus; @@ -42,16 +43,25 @@ public class DiscoverySettings extends AbstractComponent { * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continue to process the next cluster state update after this time has elapsed **/ - public static final Setting PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting PUBLISH_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), + Property.Dynamic, Property.NodeScope); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER); - public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); - public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); - public static final Setting INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER); + public static final Setting COMMIT_TIMEOUT_SETTING = + new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), + (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), + Property.Dynamic, Property.NodeScope); + public static final Setting NO_MASTER_BLOCK_SETTING = + new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, + Property.Dynamic, Property.NodeScope); + public static final Setting PUBLISH_DIFF_ENABLE_SETTING = + Setting.boolSetting("discovery.zen.publish_diff.enable", true, Property.Dynamic, Property.NodeScope); + public static final Setting INITIAL_STATE_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), Property.NodeScope); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index 0462d6a8d8d..cf697871d35 100644 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery.local; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.Diff; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 19a2cf06bf4..0edbf8841ad 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; @@ -28,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -346,7 +345,7 @@ public class NodeJoinController extends AbstractComponent { } private void assertClusterStateThread() { - assert clusterService instanceof InternalClusterService == false || ((InternalClusterService) clusterService).assertClusterStateThread(); + assert clusterService instanceof ClusterService == false || ((ClusterService) clusterService).assertClusterStateThread(); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index c0dd78b4e5f..6e0f17812ce 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -24,7 +24,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; @@ -35,7 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; @@ -46,6 +45,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -71,6 +71,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -78,6 +79,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -86,17 +88,26 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static Setting PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER); - public final static Setting JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout", - settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER); - public final static Setting JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER); - public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER); - public final static Setting SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", - settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public final static Setting MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER); + public final static Setting PING_TIMEOUT_SETTING = + Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope); + public final static Setting JOIN_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.join_timeout", + settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), + TimeValue.timeValueMillis(0), Property.NodeScope); + public final static Setting JOIN_RETRY_ATTEMPTS_SETTING = + Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, Property.NodeScope); + public final static Setting JOIN_RETRY_DELAY_SETTING = + Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), Property.NodeScope); + public final static Setting MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = + Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, Property.NodeScope); + public final static Setting SEND_LEAVE_REQUEST_SETTING = + Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope); + public final static Setting MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout", + settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), + Property.NodeScope); + public final static Setting MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING = + Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope); public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin"; @@ -127,8 +138,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private final ElectMasterService electMaster; - private final boolean masterElectionFilterClientNodes; - private final boolean masterElectionFilterDataNodes; + private final boolean masterElectionIgnoreNonMasters; private final TimeValue masterElectionWaitForJoinsTimeout; private final JoinThreadControl joinThreadControl; @@ -158,11 +168,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings); this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings); - this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings); - this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings); + this.masterElectionIgnoreNonMasters = MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING.get(settings); this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings); - logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); + logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.ignore_non_master [{}]", + this.pingTimeout, joinTimeout, masterElectionIgnoreNonMasters); clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { final ClusterState clusterState = clusterService.state(); @@ -823,7 +833,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return null; } if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder("full ping responses:"); + StringBuilder sb = new StringBuilder(); if (fullPingResponses.length == 0) { sb.append(" {none}"); } else { @@ -831,33 +841,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen sb.append("\n\t--> ").append(pingResponse); } } - logger.trace(sb.toString()); + logger.trace("full ping responses:{}", sb); } // filter responses - List pingResponses = new ArrayList<>(); - for (ZenPing.PingResponse pingResponse : fullPingResponses) { - DiscoveryNode node = pingResponse.node(); - if (masterElectionFilterClientNodes && (node.clientNode() || (!node.masterNode() && !node.dataNode()))) { - // filter out the client node, which is a client node, or also one that is not data and not master (effectively, client) - } else if (masterElectionFilterDataNodes && (!node.masterNode() && node.dataNode())) { - // filter out data node that is not also master - } else { - pingResponses.add(pingResponse); - } - } - - if (logger.isDebugEnabled()) { - StringBuilder sb = new StringBuilder("filtered ping responses: (filter_client[").append(masterElectionFilterClientNodes).append("], filter_data[").append(masterElectionFilterDataNodes).append("])"); - if (pingResponses.isEmpty()) { - sb.append(" {none}"); - } else { - for (ZenPing.PingResponse pingResponse : pingResponses) { - sb.append("\n\t--> ").append(pingResponse); - } - } - logger.debug(sb.toString()); - } + final List pingResponses; + pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger); final DiscoveryNode localNode = clusterService.localNode(); List pingMasters = new ArrayList<>(); @@ -913,12 +902,34 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } + static List filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, ESLogger logger) { + List pingResponses; + if (masterElectionIgnoreNonMasters) { + pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList()); + } else { + pingResponses = Arrays.asList(fullPingResponses); + } + + if (logger.isDebugEnabled()) { + StringBuilder sb = new StringBuilder(); + if (pingResponses.isEmpty()) { + sb.append(" {none}"); + } else { + for (ZenPing.PingResponse pingResponse : pingResponses) { + sb.append("\n\t--> ").append(pingResponse); + } + } + logger.debug("filtered ping responses: (ignore_non_masters [{}]){}", masterElectionIgnoreNonMasters, sb); + } + return pingResponses; + } + protected ClusterState rejoin(ClusterState clusterState, String reason) { // *** called from within an cluster state update task *** // - assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME); + assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME); - logger.warn(reason + ", current nodes: {}", clusterState.nodes()); + logger.warn("{}, current nodes: {}", reason, clusterState.nodes()); nodesFD.stop(); masterFD.stop(reason); @@ -946,7 +957,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private ClusterState handleAnotherMaster(ClusterState localClusterState, final DiscoveryNode otherMaster, long otherClusterStateVersion, String reason) { assert localClusterState.nodes().localNodeMaster() : "handleAnotherMaster called but current node is not a master"; - assert Thread.currentThread().getName().contains(InternalClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; + assert Thread.currentThread().getName().contains(ClusterService.UPDATE_THREAD_NAME) : "not called from the cluster state update thread"; if (otherClusterStateVersion > localClusterState.version()) { return rejoin(localClusterState, "zen-disco-discovered another master with a new cluster_state [" + otherMaster + "][" + reason + "]"); @@ -1184,7 +1195,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } private void assertClusterStateThread() { - assert clusterService instanceof InternalClusterService == false || ((InternalClusterService) clusterService).assertClusterStateThread(); + assert clusterService instanceof ClusterService == false || ((ClusterService) clusterService).assertClusterStateThread(); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 1482fb92a22..a3da8be5a94 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -40,7 +41,8 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER); + public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = + Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java index 62b0250315c..1cfd46634a5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/FaultDetection.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -37,11 +37,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public abstract class FaultDetection extends AbstractComponent { - public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, false, Scope.CLUSTER); - public static final Setting PING_INTERVAL_SETTING = Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), false, Scope.CLUSTER); - public static final Setting PING_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), false, Scope.CLUSTER); - public static final Setting PING_RETRIES_SETTING = Setting.intSetting("discovery.zen.fd.ping_retries", 3, false, Scope.CLUSTER); - public static final Setting REGISTER_CONNECTION_LISTENER_SETTING = Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, false, Scope.CLUSTER); + public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = + Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); + public static final Setting PING_INTERVAL_SETTING = + Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), Property.NodeScope); + public static final Setting PING_TIMEOUT_SETTING = + Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), Property.NodeScope); + public static final Setting PING_RETRIES_SETTING = + Setting.intSetting("discovery.zen.fd.ping_retries", 3, Property.NodeScope); + public static final Setting REGISTER_CONNECTION_LISTENER_SETTING = + Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, Property.NodeScope); protected final ThreadPool threadPool; protected final ClusterName clusterName; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index 73be1d3bb28..96ed7f76419 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -21,12 +21,12 @@ package org.elasticsearch.discovery.zen.fd; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 04af8207c37..de4caf664ea 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -19,9 +19,9 @@ package org.elasticsearch.discovery.zen.membership; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 427abca8d85..0e9b81ad1fc 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -86,8 +87,11 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPing { public static final String ACTION_NAME = "internal:discovery/zen/unicast"; - public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, false, Setting.Scope.CLUSTER); + public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = + Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), + Property.NodeScope); + public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = + Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope); // these limits are per-address public static final int LIMIT_FOREIGN_PORTS_COUNT = 1; diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index 1f8cffc97f3..e022ce6ad2f 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; @@ -46,15 +47,17 @@ import static org.elasticsearch.common.Strings.cleanPath; // TODO: move PathUtils to be package-private here instead of // public+forbidden api! public class Environment { - public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", false, Setting.Scope.CLUSTER); - public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", false, Setting.Scope.CLUSTER); - public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", false, Setting.Scope.CLUSTER); - public static final Setting> PATH_DATA_SETTING = Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", false, Setting.Scope.CLUSTER); - public static final Setting PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", false, Setting.Scope.CLUSTER); - public static final Setting> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", false, Setting.Scope.CLUSTER); - public static final Setting PIDFILE_SETTING = Setting.simpleString("pidfile", false, Setting.Scope.CLUSTER); + public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope); + public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", Property.NodeScope); + public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope); + public static final Setting> PATH_DATA_SETTING = + Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope); + public static final Setting PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", Property.NodeScope); + public static final Setting> PATH_REPO_SETTING = + Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope); + public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope); + public static final Setting PIDFILE_SETTING = Setting.simpleString("pidfile", Property.NodeScope); private final Settings settings; diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 0eec5c5765e..c6eec09b1c8 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -36,7 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -49,7 +49,6 @@ import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.monitor.process.ProcessProbe; import java.io.Closeable; import java.io.IOException; @@ -71,7 +70,6 @@ import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; @@ -90,7 +88,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */ public final Boolean spins; - public NodePath(Path path, Environment environment) throws IOException { + public NodePath(Path path) throws IOException { this.path = path; this.indicesPath = path.resolve(INDICES_FOLDER); this.fileStore = Environment.getFileStore(path); @@ -103,16 +101,18 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl /** * Resolves the given shards directory against this NodePath + * ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id} */ public Path resolve(ShardId shardId) { return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id())); } /** - * Resolves the given indexes directory against this NodePath + * Resolves index directory against this NodePath + * ${data.paths}/nodes/{node.id}/indices/{index.uuid} */ public Path resolve(Index index) { - return indicesPath.resolve(index.getName()); + return indicesPath.resolve(index.getUUID()); } @Override @@ -132,25 +132,25 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl private final int localNodeId; private final AtomicBoolean closed = new AtomicBoolean(false); - private final Map shardLocks = new HashMap<>(); + private final Map shardLocks = new HashMap<>(); /** * Maximum number of data nodes that should run in an environment. */ - public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, false, - Scope.CLUSTER); + public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, + Property.NodeScope); /** * If true automatically append node id to custom data paths. */ - public static final Setting ADD_NODE_ID_TO_CUSTOM_PATH = Setting.boolSetting("node.add_id_to_custom_path", true, false, - Scope.CLUSTER); + public static final Setting ADD_NODE_ID_TO_CUSTOM_PATH = + Setting.boolSetting("node.add_id_to_custom_path", true, Property.NodeScope); /** * If true the [verbose] SegmentInfos.infoStream logging is sent to System.out. */ - public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting - .boolSetting("node.enable_lucene_segment_infos_trace", false, false, Scope.CLUSTER); + public static final Setting ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = + Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope); public static final String NODES_FOLDER = "nodes"; public static final String INDICES_FOLDER = "indices"; @@ -187,7 +187,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); - nodePaths[dirIndex] = new NodePath(dir, environment); + nodePaths[dirIndex] = new NodePath(dir); localNodeId = possibleLockId; } catch (LockObtainFailedException ex) { logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath()); @@ -225,7 +225,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl maybeLogPathDetails(); maybeLogHeapDetails(); - + applySegmentInfosTrace(settings); assertCanWrite(); success = true; @@ -250,7 +250,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl // We do some I/O in here, so skip this if DEBUG/INFO are not enabled: if (logger.isDebugEnabled()) { // Log one line per path.data: - StringBuilder sb = new StringBuilder("node data locations details:"); + StringBuilder sb = new StringBuilder(); for (NodePath nodePath : nodePaths) { sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath()); @@ -278,7 +278,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl .append(fsPath.getType()) .append(']'); } - logger.debug(sb.toString()); + logger.debug("node data locations details:{}", sb); } else if (logger.isInfoEnabled()) { FsInfo.Path totFSPath = new FsInfo.Path(); Set allTypes = new HashSet<>(); @@ -306,14 +306,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } // Just log a 1-line summary: - logger.info(String.format(Locale.ROOT, - "using [%d] data paths, mounts [%s], net usable_space [%s], net total_space [%s], spins? [%s], types [%s]", - nodePaths.length, - allMounts, - totFSPath.getAvailable(), - totFSPath.getTotal(), - toString(allSpins), - toString(allTypes))); + logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]", + nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes)); } } @@ -452,11 +446,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * @param indexSettings settings for the index being deleted */ public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings) throws IOException { - final Path[] indexPaths = indexPaths(index.getName()); + final Path[] indexPaths = indexPaths(index); logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths); IOUtils.rm(indexPaths); if (indexSettings.hasCustomDataPath()) { - Path customLocation = resolveCustomLocation(indexSettings, index.getName()); + Path customLocation = resolveIndexCustomLocation(indexSettings); logger.trace("deleting custom index {} directory [{}]", index, customLocation); IOUtils.rm(customLocation); } @@ -524,17 +518,16 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl */ public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS) throws IOException { logger.trace("acquiring node shardlock on [{}], timeout [{}]", shardId, lockTimeoutMS); - final ShardLockKey shardLockKey = new ShardLockKey(shardId); final InternalShardLock shardLock; final boolean acquired; synchronized (shardLocks) { - if (shardLocks.containsKey(shardLockKey)) { - shardLock = shardLocks.get(shardLockKey); + if (shardLocks.containsKey(shardId)) { + shardLock = shardLocks.get(shardId); shardLock.incWaitCount(); acquired = false; } else { - shardLock = new InternalShardLock(shardLockKey); - shardLocks.put(shardLockKey, shardLock); + shardLock = new InternalShardLock(shardId); + shardLocks.put(shardId, shardLock); acquired = true; } } @@ -554,7 +547,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl @Override protected void closeInternal() { shardLock.release(); - logger.trace("released shard lock for [{}]", shardLockKey); + logger.trace("released shard lock for [{}]", shardId); } }; } @@ -566,51 +559,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl */ public Set lockedShards() { synchronized (shardLocks) { - Set lockedShards = shardLocks.keySet().stream() - .map(shardLockKey -> new ShardId(new Index(shardLockKey.indexName, "_na_"), shardLockKey.shardId)).collect(Collectors.toSet()); - return unmodifiableSet(lockedShards); - } - } - - // a key for the shard lock. we can't use shardIds, because the contain - // the index uuid, but we want the lock semantics to the same as we map indices to disk folders, i.e., without the uuid (for now). - private final class ShardLockKey { - final String indexName; - final int shardId; - - public ShardLockKey(final ShardId shardId) { - this.indexName = shardId.getIndexName(); - this.shardId = shardId.id(); - } - - @Override - public String toString() { - return "[" + indexName + "][" + shardId + "]"; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - ShardLockKey that = (ShardLockKey) o; - - if (shardId != that.shardId) { - return false; - } - return indexName.equals(that.indexName); - - } - - @Override - public int hashCode() { - int result = indexName.hashCode(); - result = 31 * result + shardId; - return result; + return unmodifiableSet(new HashSet<>(shardLocks.keySet())); } } @@ -623,10 +572,10 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl */ private final Semaphore mutex = new Semaphore(1); private int waitCount = 1; // guarded by shardLocks - private final ShardLockKey lockKey; + private final ShardId shardId; - InternalShardLock(ShardLockKey id) { - lockKey = id; + InternalShardLock(ShardId shardId) { + this.shardId = shardId; mutex.acquireUninterruptibly(); } @@ -646,10 +595,10 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl synchronized (shardLocks) { assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0"; --waitCount; - logger.trace("shard lock wait count for [{}] is now [{}]", lockKey, waitCount); + logger.trace("shard lock wait count for {} is now [{}]", shardId, waitCount); if (waitCount == 0) { - logger.trace("last shard lock wait decremented, removing lock for [{}]", lockKey); - InternalShardLock remove = shardLocks.remove(lockKey); + logger.trace("last shard lock wait decremented, removing lock for {}", shardId); + InternalShardLock remove = shardLocks.remove(shardId); assert remove != null : "Removed lock was null"; } } @@ -658,11 +607,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl void acquire(long timeoutInMillis) throws LockObtainFailedException{ try { if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS) == false) { - throw new LockObtainFailedException("Can't lock shard " + lockKey + ", timed out after " + timeoutInMillis + "ms"); + throw new LockObtainFailedException("Can't lock shard " + shardId + ", timed out after " + timeoutInMillis + "ms"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new LockObtainFailedException("Can't lock shard " + lockKey + ", interrupted", e); + throw new LockObtainFailedException("Can't lock shard " + shardId + ", interrupted", e); } } } @@ -705,11 +654,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl /** * Returns all index paths. */ - public Path[] indexPaths(String indexName) { + public Path[] indexPaths(Index index) { assert assertEnvIsLocked(); Path[] indexPaths = new Path[nodePaths.length]; for (int i = 0; i < nodePaths.length; i++) { - indexPaths[i] = nodePaths[i].indicesPath.resolve(indexName); + indexPaths[i] = nodePaths[i].resolve(index); } return indexPaths; } @@ -732,25 +681,47 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl return shardLocations; } - public Set findAllIndices() throws IOException { + /** + * Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder + */ + public Set availableIndexFolders() throws IOException { if (nodePaths == null || locks == null) { throw new IllegalStateException("node is not configured to store local location"); } assert assertEnvIsLocked(); - Set indices = new HashSet<>(); + Set indexFolders = new HashSet<>(); for (NodePath nodePath : nodePaths) { Path indicesLocation = nodePath.indicesPath; if (Files.isDirectory(indicesLocation)) { try (DirectoryStream stream = Files.newDirectoryStream(indicesLocation)) { for (Path index : stream) { if (Files.isDirectory(index)) { - indices.add(index.getFileName().toString()); + indexFolders.add(index.getFileName().toString()); } } } } } - return indices; + return indexFolders; + + } + + /** + * Resolves all existing paths to indexFolderName in ${data.paths}/nodes/{node.id}/indices + */ + public Path[] resolveIndexFolder(String indexFolderName) throws IOException { + if (nodePaths == null || locks == null) { + throw new IllegalStateException("node is not configured to store local location"); + } + assert assertEnvIsLocked(); + List paths = new ArrayList<>(nodePaths.length); + for (NodePath nodePath : nodePaths) { + Path indexFolder = nodePath.indicesPath.resolve(indexFolderName); + if (Files.exists(indexFolder)) { + paths.add(indexFolder); + } + } + return paths.toArray(new Path[paths.size()]); } /** @@ -768,13 +739,13 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } assert assertEnvIsLocked(); final Set shardIds = new HashSet<>(); - String indexName = index.getName(); + final String indexUniquePathId = index.getUUID(); for (final NodePath nodePath : nodePaths) { Path location = nodePath.indicesPath; if (Files.isDirectory(location)) { try (DirectoryStream indexStream = Files.newDirectoryStream(location)) { for (Path indexPath : indexStream) { - if (indexName.equals(indexPath.getFileName().toString())) { + if (indexUniquePathId.equals(indexPath.getFileName().toString())) { shardIds.addAll(findAllShardsForIndex(indexPath, index)); } } @@ -785,7 +756,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl } private static Set findAllShardsForIndex(Path indexPath, Index index) throws IOException { - assert indexPath.getFileName().toString().equals(index.getName()); + assert indexPath.getFileName().toString().equals(index.getUUID()); Set shardIds = new HashSet<>(); if (Files.isDirectory(indexPath)) { try (DirectoryStream stream = Files.newDirectoryStream(indexPath)) { @@ -868,7 +839,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * * @param indexSettings settings for the index */ - private Path resolveCustomLocation(IndexSettings indexSettings) { + public Path resolveBaseCustomLocation(IndexSettings indexSettings) { String customDataDir = indexSettings.customDataPath(); if (customDataDir != null) { // This assert is because this should be caught by MetaDataCreateIndexService @@ -889,10 +860,9 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * the root path for the index. * * @param indexSettings settings for the index - * @param indexName index to resolve the path for */ - private Path resolveCustomLocation(IndexSettings indexSettings, final String indexName) { - return resolveCustomLocation(indexSettings).resolve(indexName); + private Path resolveIndexCustomLocation(IndexSettings indexSettings) { + return resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getUUID()); } /** @@ -904,7 +874,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl * @param shardId shard to resolve the path to */ public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) { - return resolveCustomLocation(indexSettings, shardId.getIndexName()).resolve(Integer.toString(shardId.id())); + return resolveIndexCustomLocation(indexSettings).resolve(Integer.toString(shardId.id())); } /** @@ -928,22 +898,24 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl for (Path path : nodeDataPaths()) { // check node-paths are writable tryWriteTempFile(path); } - for (String index : this.findAllIndices()) { - for (Path path : this.indexPaths(index)) { // check index paths are writable - Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME); - tryWriteTempFile(statePath); - tryWriteTempFile(path); - } - for (ShardId shardID : this.findAllShardIds(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE))) { - Path[] paths = this.availableShardPaths(shardID); - for (Path path : paths) { // check shard paths are writable - Path indexDir = path.resolve(ShardPath.INDEX_FOLDER_NAME); - Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME); - Path translogDir = path.resolve(ShardPath.TRANSLOG_FOLDER_NAME); - tryWriteTempFile(indexDir); - tryWriteTempFile(translogDir); - tryWriteTempFile(statePath); - tryWriteTempFile(path); + for (String indexFolderName : this.availableIndexFolders()) { + for (Path indexPath : this.resolveIndexFolder(indexFolderName)) { // check index paths are writable + Path indexStatePath = indexPath.resolve(MetaDataStateFormat.STATE_DIR_NAME); + tryWriteTempFile(indexStatePath); + tryWriteTempFile(indexPath); + try (DirectoryStream stream = Files.newDirectoryStream(indexPath)) { + for (Path shardPath : stream) { + String fileName = shardPath.getFileName().toString(); + if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) { + Path indexDir = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME); + Path statePath = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME); + Path translogDir = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME); + tryWriteTempFile(indexDir); + tryWriteTempFile(translogDir); + tryWriteTempFile(statePath); + tryWriteTempFile(shardPath); + } + } } } } diff --git a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 757a78c3b5f..1ccdb43cc45 100644 --- a/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/core/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -269,7 +269,7 @@ public abstract class AsyncShardFetch implements Rel */ // visible for testing void asyncFetch(final ShardId shardId, final String[] nodesIds, final MetaData metaData) { - IndexMetaData indexMetaData = metaData.index(shardId.getIndex()); + IndexMetaData indexMetaData = metaData.getIndexSafe(shardId.getIndex()); logger.trace("{} fetching [{}] from {}", shardId, type, nodesIds); action.list(shardId, indexMetaData, nodesIds, new ActionListener>() { @Override diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index e2fcb56b1e1..b4d8eeae532 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractComponent; @@ -26,12 +27,17 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -47,7 +53,7 @@ public class DanglingIndicesState extends AbstractComponent { private final MetaStateService metaStateService; private final LocalAllocateDangledIndices allocateDangledIndices; - private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); + private final Map danglingIndices = ConcurrentCollections.newConcurrentMap(); @Inject public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -74,7 +80,7 @@ public class DanglingIndicesState extends AbstractComponent { /** * The current set of dangling indices. */ - Map getDanglingIndices() { + Map getDanglingIndices() { // This might be a good use case for CopyOnWriteHashMap return unmodifiableMap(new HashMap<>(danglingIndices)); } @@ -83,10 +89,16 @@ public class DanglingIndicesState extends AbstractComponent { * Cleans dangling indices if they are already allocated on the provided meta data. */ void cleanupAllocatedDangledIndices(MetaData metaData) { - for (String danglingIndex : danglingIndices.keySet()) { - if (metaData.hasIndex(danglingIndex)) { - logger.debug("[{}] no longer dangling (created), removing from dangling list", danglingIndex); - danglingIndices.remove(danglingIndex); + for (Index index : danglingIndices.keySet()) { + final IndexMetaData indexMetaData = metaData.index(index); + if (indexMetaData != null && indexMetaData.getIndex().getName().equals(index.getName())) { + if (indexMetaData.getIndex().getUUID().equals(index.getUUID()) == false) { + logger.warn("[{}] can not be imported as a dangling index, as there is already another index " + + "with the same name but a different uuid. local index will be ignored (but not deleted)", index); + } else { + logger.debug("[{}] no longer dangling (created), removing from dangling list", index); + } + danglingIndices.remove(index); } } } @@ -104,36 +116,30 @@ public class DanglingIndicesState extends AbstractComponent { * that have state on disk, but are not part of the provided meta data, or not detected * as dangled already. */ - Map findNewDanglingIndices(MetaData metaData) { - final Set indices; + Map findNewDanglingIndices(MetaData metaData) { + final Set excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size()); + for (ObjectCursor cursor : metaData.indices().values()) { + excludeIndexPathIds.add(cursor.value.getIndex().getUUID()); + } + excludeIndexPathIds.addAll(danglingIndices.keySet().stream().map(Index::getUUID).collect(Collectors.toList())); try { - indices = nodeEnv.findAllIndices(); - } catch (Throwable e) { + final List indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains); + Map newIndices = new HashMap<>(indexMetaDataList.size()); + for (IndexMetaData indexMetaData : indexMetaDataList) { + if (metaData.hasIndex(indexMetaData.getIndex().getName())) { + logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", + indexMetaData.getIndex()); + } else { + logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", + indexMetaData.getIndex()); + newIndices.put(indexMetaData.getIndex(), indexMetaData); + } + } + return newIndices; + } catch (IOException e) { logger.warn("failed to list dangling indices", e); return emptyMap(); } - - Map newIndices = new HashMap<>(); - for (String indexName : indices) { - if (metaData.hasIndex(indexName) == false && danglingIndices.containsKey(indexName) == false) { - try { - IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName); - if (indexMetaData != null) { - logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName); - if (!indexMetaData.getIndex().getName().equals(indexName)) { - logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.getIndex()); - indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build(); - } - newIndices.put(indexName, indexMetaData); - } else { - logger.debug("[{}] dangling index directory detected, but no state found", indexName); - } - } catch (Throwable t) { - logger.warn("[{}] failed to load index state for detected dangled index", t, indexName); - } - } - } - return newIndices; } /** diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java index fd3bd9a0b6d..b2cb2d11079 100644 --- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -20,22 +20,26 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.ObjectFloatHashMap; -import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.indices.IndicesService; import java.nio.file.Path; +import java.util.Arrays; import java.util.function.Supplier; /** @@ -52,10 +56,15 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { private final TransportNodesListGatewayMetaState listGatewayMetaState; private final Supplier minimumMasterNodesProvider; + private final IndicesService indicesService; + private final NodeServicesProvider nodeServicesProvider; public Gateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv, GatewayMetaState metaState, - TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery) { + TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery, + NodeServicesProvider nodeServicesProvider, IndicesService indicesService) { super(settings); + this.nodeServicesProvider = nodeServicesProvider; + this.indicesService = indicesService; this.clusterService = clusterService; this.nodeEnv = nodeEnv; this.metaState = metaState; @@ -65,9 +74,9 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { } public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException { - ObjectHashSet nodesIds = new ObjectHashSet<>(clusterService.state().nodes().masterNodes().keys()); - logger.trace("performing state recovery from {}", nodesIds); - TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds.toArray(String.class), null).actionGet(); + String[] nodesIds = clusterService.state().nodes().masterNodes().keys().toArray(String.class); + logger.trace("performing state recovery from {}", Arrays.toString(nodesIds)); + TransportNodesListGatewayMetaState.NodesGatewayMetaState nodesState = listGatewayMetaState.list(nodesIds, null).actionGet(); int requiredAllocation = Math.max(1, minimumMasterNodesProvider.get()); @@ -79,7 +88,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { } } - ObjectFloatHashMap indices = new ObjectFloatHashMap<>(); + ObjectFloatHashMap indices = new ObjectFloatHashMap<>(); MetaData electedGlobalState = null; int found = 0; for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) { @@ -93,7 +102,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { electedGlobalState = nodeState.metaData(); } for (ObjectCursor cursor : nodeState.metaData().indices().values()) { - indices.addTo(cursor.value.getIndex().getName(), 1); + indices.addTo(cursor.value.getIndex(), 1); } } if (found < requiredAllocation) { @@ -107,7 +116,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { final Object[] keys = indices.keys; for (int i = 0; i < keys.length; i++) { if (keys[i] != null) { - String index = (String) keys[i]; + Index index = (Index) keys[i]; IndexMetaData electedIndexMetaData = null; int indexMetaDataCount = 0; for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) { @@ -128,11 +137,24 @@ public class Gateway extends AbstractComponent implements ClusterStateListener { if (electedIndexMetaData != null) { if (indexMetaDataCount < requiredAllocation) { logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation); + } // TODO if this logging statement is correct then we are missing an else here + try { + if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) { + // verify that we can actually create this index - if not we recover it as closed with lots of warn logs + indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData); + } + } catch (Exception e) { + logger.warn("recovering index {} failed - recovering as closed", e, electedIndexMetaData.getIndex()); + electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build(); } + metaDataBuilder.put(electedIndexMetaData, false); } } } + final ClusterSettings clusterSettings = clusterService.getClusterSettings(); + metaDataBuilder.persistentSettings(clusterSettings.archiveUnknownOrBrokenSettings(metaDataBuilder.persistentSettings())); + metaDataBuilder.transientSettings(clusterSettings.archiveUnknownOrBrokenSettings(metaDataBuilder.transientSettings())); ClusterState.Builder builder = ClusterState.builder(clusterService.state().getClusterName()); builder.metaData(metaDataBuilder); listener.onSuccess(builder.build()); diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index acd650bc6f7..0059a0ef61b 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -22,7 +22,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index e90cb750cf5..867b7420107 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -34,7 +34,9 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.IndexFolderUpgrader; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -61,7 +63,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Nullable private volatile MetaData previousMetaData; - private volatile Set previouslyWrittenIndices = emptySet(); + private volatile Set previouslyWrittenIndices = emptySet(); @Inject public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, @@ -84,7 +86,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) { try { ensureNoPre019State(); - pre20Upgrade(); + IndexFolderUpgrader.upgradeIndicesIfNeeded(settings, nodeEnv); + upgradeMetaData(); long startNS = System.nanoTime(); metaStateService.loadFullState(); logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS))); @@ -102,7 +105,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL @Override public void clusterChanged(ClusterChangedEvent event) { - Set relevantIndices = new HashSet<>(); final ClusterState state = event.state(); if (state.blocks().disableStatePersistence()) { // reset the current metadata, we need to start fresh... @@ -113,7 +115,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL MetaData newMetaData = state.metaData(); // we don't check if metaData changed, since we might be called several times and we need to check dangling... - + Set relevantIndices = Collections.emptySet(); boolean success = true; // write the state if this node is a master eligible node or if it is a data node and has shards allocated on it if (state.nodes().localNode().masterNode() || state.nodes().localNode().dataNode()) { @@ -126,14 +128,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // persistence was disabled or the node was restarted), see getRelevantIndicesOnDataOnlyNode(). // we therefore have to check here if we have shards on disk and add their indices to the previouslyWrittenIndices list if (isDataOnlyNode(state)) { - Set newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size()); + Set newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size()); for (IndexMetaData indexMetaData : newMetaData) { IndexMetaData indexMetaDataOnDisk = null; if (indexMetaData.getState().equals(IndexMetaData.State.CLOSE)) { - indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex().getName()); + indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex()); } if (indexMetaDataOnDisk != null) { - newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex().getName()); + newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex()); } } newPreviouslyWrittenIndices.addAll(previouslyWrittenIndices); @@ -152,13 +154,13 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } } - Iterable writeInfo; + relevantIndices = getRelevantIndices(event.state(), event.previousState(), previouslyWrittenIndices); - writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); + final Iterable writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData()); // check and write changes in indices for (IndexMetaWriteInfo indexMetaWrite : writeInfo) { try { - metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData, indexMetaWrite.previousMetaData); + metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData); } catch (Throwable e) { success = false; } @@ -166,15 +168,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } danglingIndicesState.processDanglingIndices(newMetaData); - if (success) { previousMetaData = newMetaData; previouslyWrittenIndices = unmodifiableSet(relevantIndices); } } - public static Set getRelevantIndices(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { - Set relevantIndices; + public static Set getRelevantIndices(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { + Set relevantIndices; if (isDataOnlyNode(state)) { relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices); } else if (state.nodes().localNode().masterNode() == true) { @@ -202,7 +203,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL try (DirectoryStream stream = Files.newDirectoryStream(stateLocation)) { for (Path stateFile : stream) { if (logger.isTraceEnabled()) { - logger.trace("[upgrade]: processing [" + stateFile.getFileName() + "]"); + logger.trace("[upgrade]: processing [{}]", stateFile.getFileName()); } final String name = stateFile.getFileName().toString(); if (name.startsWith("metadata-")) { @@ -221,7 +222,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL * MetaDataIndexUpgradeService might also update obsolete settings if needed. When this happens we rewrite * index metadata with new settings. */ - private void pre20Upgrade() throws Exception { + private void upgradeMetaData() throws Exception { MetaData metaData = loadMetaState(); List updateIndexMetaData = new ArrayList<>(); for (IndexMetaData indexMetaData : metaData) { @@ -233,7 +234,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL // We successfully checked all indices for backward compatibility and found no non-upgradable indices, which // means the upgrade can continue. Now it's safe to overwrite index metadata with the new version. for (IndexMetaData indexMetaData : updateIndexMetaData) { - metaStateService.writeIndex("upgrade", indexMetaData, null); + // since we upgraded the index folders already, write index state in the upgraded index folder + metaStateService.writeIndex("upgrade", indexMetaData); } } @@ -264,10 +266,10 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL * @param newMetaData The new metadata * @return iterable over all indices states that should be written to disk */ - public static Iterable resolveStatesToBeWritten(Set previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { + public static Iterable resolveStatesToBeWritten(Set previouslyWrittenIndices, Set potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) { List indicesToWrite = new ArrayList<>(); - for (String index : potentiallyUnwrittenIndices) { - IndexMetaData newIndexMetaData = newMetaData.index(index); + for (Index index : potentiallyUnwrittenIndices) { + IndexMetaData newIndexMetaData = newMetaData.getIndexSafe(index); IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index); String writeReason = null; if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) { @@ -282,14 +284,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL return indicesToWrite; } - public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { + public static Set getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set previouslyWrittenIndices) { RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().localNodeId()); if (newRoutingNode == null) { throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state"); } - Set indices = new HashSet<>(); + Set indices = new HashSet<>(); for (ShardRouting routing : newRoutingNode) { - indices.add(routing.index().getName()); + indices.add(routing.index()); } // we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously for (IndexMetaData indexMetaData : state.metaData()) { @@ -300,19 +302,19 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (previousMetaData != null) { isOrWasClosed = isOrWasClosed || previousMetaData.getState().equals(IndexMetaData.State.CLOSE); } - if (previouslyWrittenIndices.contains(indexMetaData.getIndex().getName()) && isOrWasClosed) { - indices.add(indexMetaData.getIndex().getName()); + if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && isOrWasClosed) { + indices.add(indexMetaData.getIndex()); } } return indices; } - public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices; + public static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { + Set relevantIndices; relevantIndices = new HashSet<>(); // we have to iterate over the metadata to make sure we also capture closed indices for (IndexMetaData indexMetaData : state.metaData()) { - relevantIndices.add(indexMetaData.getIndex().getName()); + relevantIndices.add(indexMetaData.getIndex()); } return relevantIndices; } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index 384539b4c63..7dcc45f1c0a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -21,7 +21,6 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -34,14 +33,18 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.NodeServicesProvider; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -52,20 +55,20 @@ import java.util.concurrent.atomic.AtomicBoolean; */ public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { - public static final Setting EXPECTED_NODES_SETTING = Setting.intSetting( - "gateway.expected_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting EXPECTED_DATA_NODES_SETTING = Setting.intSetting( - "gateway.expected_data_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting EXPECTED_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.expected_master_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting( - "gateway.recover_after_time", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_data_nodes", -1, -1, false, Setting.Scope.CLUSTER); - public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting( - "gateway.recover_after_master_nodes", 0, 0, false, Setting.Scope.CLUSTER); + public static final Setting EXPECTED_NODES_SETTING = + Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope); + public static final Setting EXPECTED_DATA_NODES_SETTING = + Setting.intSetting("gateway.expected_data_nodes", -1, -1, Property.NodeScope); + public static final Setting EXPECTED_MASTER_NODES_SETTING = + Setting.intSetting("gateway.expected_master_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_TIME_SETTING = + Setting.positiveTimeSetting("gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope); + public static final Setting RECOVER_AFTER_NODES_SETTING = + Setting.intSetting("gateway.recover_after_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_DATA_NODES_SETTING = + Setting.intSetting("gateway.recover_after_data_nodes", -1, -1, Property.NodeScope); + public static final Setting RECOVER_AFTER_MASTER_NODES_SETTING = + Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope); public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); @@ -94,9 +97,11 @@ public class GatewayService extends AbstractLifecycleComponent i @Inject public GatewayService(Settings settings, AllocationService allocationService, ClusterService clusterService, ThreadPool threadPool, NodeEnvironment nodeEnvironment, GatewayMetaState metaState, - TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery) { + TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery, + NodeServicesProvider nodeServicesProvider, IndicesService indicesService) { super(settings); - this.gateway = new Gateway(settings, clusterService, nodeEnvironment, metaState, listGatewayMetaState, discovery); + this.gateway = new Gateway(settings, clusterService, nodeEnvironment, metaState, listGatewayMetaState, discovery, + nodeServicesProvider, indicesService); this.allocationService = allocationService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -161,11 +166,14 @@ public class GatewayService extends AbstractLifecycleComponent i if (state.nodes().masterNodeId() == null) { logger.debug("not recovering from gateway, no master elected yet"); } else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) { - logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (data+master) [{}] < recover_after_nodes [{}]", + nodes.masterAndDataNodes().size(), recoverAfterNodes); } else if (recoverAfterDataNodes != -1 && nodes.dataNodes().size() < recoverAfterDataNodes) { - logger.debug("not recovering from gateway, nodes_size (data) [" + nodes.dataNodes().size() + "] < recover_after_data_nodes [" + recoverAfterDataNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]", + nodes.dataNodes().size(), recoverAfterDataNodes); } else if (recoverAfterMasterNodes != -1 && nodes.masterNodes().size() < recoverAfterMasterNodes) { - logger.debug("not recovering from gateway, nodes_size (master) [" + nodes.masterNodes().size() + "] < recover_after_master_nodes [" + recoverAfterMasterNodes + "]"); + logger.debug("not recovering from gateway, nodes_size (master) [{}] < recover_after_master_nodes [{}]", + nodes.masterNodes().size(), recoverAfterMasterNodes); } else { boolean enforceRecoverAfterTime; String reason; diff --git a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 041b8cafecc..b14dcc6d1a4 100644 --- a/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/core/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -19,7 +19,6 @@ package org.elasticsearch.gateway; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -30,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 9ef09753c43..89192c47d09 100644 --- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -25,58 +25,26 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; /** * Handles writing and loading both {@link MetaData} and {@link IndexMetaData} */ public class MetaStateService extends AbstractComponent { - static final String FORMAT_SETTING = "gateway.format"; - - static final String GLOBAL_STATE_FILE_PREFIX = "global-"; - private static final String INDEX_STATE_FILE_PREFIX = "state-"; - private final NodeEnvironment nodeEnv; - private final XContentType format; - private final ToXContent.Params formatParams; - private final ToXContent.Params gatewayModeFormatParams; - private final MetaDataStateFormat indexStateFormat; - private final MetaDataStateFormat globalStateFormat; - @Inject public MetaStateService(Settings settings, NodeEnvironment nodeEnv) { super(settings); this.nodeEnv = nodeEnv; - this.format = XContentType.fromMediaTypeOrFormat(settings.get(FORMAT_SETTING, "smile")); - if (this.format == XContentType.SMILE) { - Map params = new HashMap<>(); - params.put("binary", "true"); - formatParams = new ToXContent.MapParams(params); - Map gatewayModeParams = new HashMap<>(); - gatewayModeParams.put("binary", "true"); - gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); - gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); - } else { - formatParams = ToXContent.EMPTY_PARAMS; - Map gatewayModeParams = new HashMap<>(); - gatewayModeParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_GATEWAY); - gatewayModeFormatParams = new ToXContent.MapParams(gatewayModeParams); - } - indexStateFormat = indexStateFormat(format, formatParams); - globalStateFormat = globalStateFormat(format, gatewayModeFormatParams); - } /** @@ -91,14 +59,12 @@ public class MetaStateService extends AbstractComponent { } else { metaDataBuilder = MetaData.builder(); } - - final Set indices = nodeEnv.findAllIndices(); - for (String index : indices) { - IndexMetaData indexMetaData = loadIndexState(index); - if (indexMetaData == null) { - logger.debug("[{}] failed to find metadata for existing index location", index); - } else { + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.resolveIndexFolder(indexFolderName)); + if (indexMetaData != null) { metaDataBuilder.put(indexMetaData, false); + } else { + logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); } } return metaDataBuilder.build(); @@ -108,15 +74,40 @@ public class MetaStateService extends AbstractComponent { * Loads the index state for the provided index name, returning null if doesn't exists. */ @Nullable - IndexMetaData loadIndexState(String index) throws IOException { - return indexStateFormat.loadLatestState(logger, nodeEnv.indexPaths(index)); + IndexMetaData loadIndexState(Index index) throws IOException { + return IndexMetaData.FORMAT.loadLatestState(logger, nodeEnv.indexPaths(index)); + } + + /** + * Loads all indices states available on disk + */ + List loadIndicesStates(Predicate excludeIndexPathIdsPredicate) throws IOException { + List indexMetaDataList = new ArrayList<>(); + for (String indexFolderName : nodeEnv.availableIndexFolders()) { + if (excludeIndexPathIdsPredicate.test(indexFolderName)) { + continue; + } + IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, + nodeEnv.resolveIndexFolder(indexFolderName)); + if (indexMetaData != null) { + final String indexPathId = indexMetaData.getIndex().getUUID(); + if (indexFolderName.equals(indexPathId)) { + indexMetaDataList.add(indexMetaData); + } else { + throw new IllegalStateException("[" + indexFolderName+ "] invalid index folder name, rename to [" + indexPathId + "]"); + } + } else { + logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); + } + } + return indexMetaDataList; } /** * Loads the global state, *without* index state, see {@link #loadFullState()} for that. */ MetaData loadGlobalState() throws IOException { - MetaData globalState = globalStateFormat.loadLatestState(logger, nodeEnv.nodeDataPaths()); + MetaData globalState = MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths()); // ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing // TODO: can we somehow only do this for pre-2.0 cluster state? if (globalState != null) { @@ -129,13 +120,15 @@ public class MetaStateService extends AbstractComponent { /** * Writes the index state. */ - void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception { - logger.trace("[{}] writing state, reason [{}]", indexMetaData.getIndex(), reason); + void writeIndex(String reason, IndexMetaData indexMetaData) throws IOException { + final Index index = indexMetaData.getIndex(); + logger.trace("[{}] writing state, reason [{}]", index, reason); try { - indexStateFormat.write(indexMetaData, indexMetaData.getVersion(), nodeEnv.indexPaths(indexMetaData.getIndex().getName())); + IndexMetaData.FORMAT.write(indexMetaData, indexMetaData.getVersion(), + nodeEnv.indexPaths(indexMetaData.getIndex())); } catch (Throwable ex) { - logger.warn("[{}]: failed to write index state", ex, indexMetaData.getIndex()); - throw new IOException("failed to write state for [" + indexMetaData.getIndex() + "]", ex); + logger.warn("[{}]: failed to write index state", ex, index); + throw new IOException("failed to write state for [" + index + "]", ex); } } @@ -145,45 +138,10 @@ public class MetaStateService extends AbstractComponent { void writeGlobalState(String reason, MetaData metaData) throws Exception { logger.trace("[_global] writing state, reason [{}]", reason); try { - globalStateFormat.write(metaData, metaData.version(), nodeEnv.nodeDataPaths()); + MetaData.FORMAT.write(metaData, metaData.version(), nodeEnv.nodeDataPaths()); } catch (Throwable ex) { logger.warn("[_global]: failed to write global state", ex); throw new IOException("failed to write global state", ex); } } - - /** - * Returns a StateFormat that can read and write {@link MetaData} - */ - static MetaDataStateFormat globalStateFormat(XContentType format, final ToXContent.Params formatParams) { - return new MetaDataStateFormat(format, GLOBAL_STATE_FILE_PREFIX) { - - @Override - public void toXContent(XContentBuilder builder, MetaData state) throws IOException { - MetaData.Builder.toXContent(state, builder, formatParams); - } - - @Override - public MetaData fromXContent(XContentParser parser) throws IOException { - return MetaData.Builder.fromXContent(parser); - } - }; - } - - /** - * Returns a StateFormat that can read and write {@link IndexMetaData} - */ - static MetaDataStateFormat indexStateFormat(XContentType format, final ToXContent.Params formatParams) { - return new MetaDataStateFormat(format, INDEX_STATE_FILE_PREFIX) { - - @Override - public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException { - IndexMetaData.Builder.toXContent(state, builder, formatParams); } - - @Override - public IndexMetaData fromXContent(XContentParser parser) throws IOException { - return IndexMetaData.Builder.fromXContent(parser); - } - }; - } } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index ed61aa2c1fd..e7447e301c4 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardStateMetaData; @@ -67,9 +68,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } }; - public static final Setting NODE_INITIAL_SHARDS_SETTING = new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, true, Setting.Scope.CLUSTER); + public static final Setting NODE_INITIAL_SHARDS_SETTING = + new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, + Property.Dynamic, Property.NodeScope); @Deprecated - public static final Setting INDEX_RECOVERY_INITIAL_SHARDS_SETTING = new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, true, Setting.Scope.INDEX); + public static final Setting INDEX_RECOVERY_INITIAL_SHARDS_SETTING = + new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, + Property.Dynamic, Property.IndexScope); public PrimaryShardAllocator(Settings settings) { super(settings); @@ -89,7 +94,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { continue; } - final IndexMetaData indexMetaData = metaData.index(shard.getIndexName()); + final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); // don't go wild here and create a new IndexSetting object for every shard this could cause a lot of garbage // on cluster restart if we allocate a boat load of shards if (shard.allocatedPostIndexCreate(indexMetaData) == false) { @@ -113,7 +118,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { final boolean enoughAllocationsFound; if (lastActiveAllocationIds.isEmpty()) { - assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new"; + assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new"; // when we load an old index (after upgrading cluster) or restore a snapshot of an old index // fall back to old version-based allocation mode // Note that once the shard has been active, lastActiveAllocationIds will be non-empty @@ -123,7 +128,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } else { enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult); } - logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0, nodeShardsResult.allocationsFound, shard); + logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard); } else { assert lastActiveAllocationIds.isEmpty() == false; // use allocation ids to select nodes diff --git a/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java b/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java index 04f438c70fe..1d24baf561a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PriorityComparator.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; import java.util.Comparator; @@ -42,8 +43,8 @@ public abstract class PriorityComparator implements Comparator { final String o2Index = o2.getIndexName(); int cmp = 0; if (o1Index.equals(o2Index) == false) { - final Settings settingsO1 = getIndexSettings(o1Index); - final Settings settingsO2 = getIndexSettings(o2Index); + final Settings settingsO1 = getIndexSettings(o1.index()); + final Settings settingsO2 = getIndexSettings(o2.index()); cmp = Long.compare(priority(settingsO2), priority(settingsO1)); if (cmp == 0) { cmp = Long.compare(timeCreated(settingsO2), timeCreated(settingsO1)); @@ -63,7 +64,7 @@ public abstract class PriorityComparator implements Comparator { return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L); } - protected abstract Settings getIndexSettings(String index); + protected abstract Settings getIndexSettings(Index index); /** * Returns a PriorityComparator that uses the RoutingAllocation index metadata to access the index setting per index. @@ -71,8 +72,8 @@ public abstract class PriorityComparator implements Comparator { public static PriorityComparator getAllocationComparator(final RoutingAllocation allocation) { return new PriorityComparator() { @Override - protected Settings getIndexSettings(String index) { - IndexMetaData indexMetaData = allocation.metaData().index(index); + protected Settings getIndexSettings(Index index) { + IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(index); return indexMetaData.getSettings(); } }; diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index e2b6f0d27ed..74511639d47 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -74,7 +74,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - IndexMetaData indexMetaData = metaData.index(shard.getIndexName()); + IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } @@ -104,6 +104,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { && matchingNodes.isNodeMatchBySyncID(nodeWithHighestMatch) == true) { // we found a better match that has a full sync id match, the existing allocation is not fully synced // so we found a better one, cancel this one + logger.debug("cancelling allocation of replica on [{}], sync id match found on node [{}]", + currentNode, nodeWithHighestMatch); it.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA, "existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node [" + nodeWithHighestMatch + "]", null, allocation.getCurrentNanoTime(), System.currentTimeMillis())); @@ -127,7 +129,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - IndexMetaData indexMetaData = metaData.index(shard.getIndexName()); + IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index fb174f4bd45..0fd1fd35809 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -29,10 +29,10 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 03f8dc81703..7a090208818 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -29,11 +29,11 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -125,7 +125,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction logger.trace("{} loading local shard state info", shardId); ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, nodeEnv.availableShardPaths(request.shardId)); if (shardStateMetaData != null) { - final IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndexName()); // it's a mystery why this is sometimes null + final IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex()); // it's a mystery why this is sometimes null if (metaData != null) { ShardPath shardPath = null; try { diff --git a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 6c91df079b9..48af1c83965 100644 --- a/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/core/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -20,42 +20,64 @@ package org.elasticsearch.http; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import java.util.List; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.listSetting; public final class HttpTransportSettings { - public static final Setting SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_ORIGIN = new Setting("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_METHODS = new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_HEADERS = new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER); - public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER); - public static final Setting SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER); - public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_HOST = listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_PUBLISH_HOST = listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); - public static final Setting> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER); + public static final Setting SETTING_CORS_ENABLED = + Setting.boolSetting("http.cors.enabled", false, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_ORIGIN = + new Setting("http.cors.allow-origin", "", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_MAX_AGE = + Setting.intSetting("http.cors.max-age", 1728000, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_METHODS = + new Setting("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_HEADERS = + new Setting("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, Property.NodeScope); + public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = + Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope); + public static final Setting SETTING_PIPELINING = + Setting.boolSetting("http.pipelining", true, Property.NodeScope); + public static final Setting SETTING_PIPELINING_MAX_EVENTS = + Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope); + public static final Setting SETTING_HTTP_COMPRESSION = + Setting.boolSetting("http.compression", false, Property.NodeScope); + public static final Setting SETTING_HTTP_COMPRESSION_LEVEL = + Setting.intSetting("http.compression_level", 6, Property.NodeScope); + public static final Setting> SETTING_HTTP_HOST = + listSetting("http.host", emptyList(), Function.identity(), Property.NodeScope); + public static final Setting> SETTING_HTTP_PUBLISH_HOST = + listSetting("http.publish_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope); + public static final Setting> SETTING_HTTP_BIND_HOST = + listSetting("http.bind_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope); - public static final Setting SETTING_HTTP_PORT = new Setting("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", -1, -1, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER); - public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ; - public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ; + public static final Setting SETTING_HTTP_PORT = + new Setting("http.port", "9200-9300", PortsRange::new, Property.NodeScope); + public static final Setting SETTING_HTTP_PUBLISH_PORT = + Setting.intSetting("http.publish_port", -1, -1, Property.NodeScope); + public static final Setting SETTING_HTTP_DETAILED_ERRORS_ENABLED = + Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = + Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = + Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = + Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = + Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope); // don't reset cookies by default, since I don't think we really need to // note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies - public static final Setting SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER); + public static final Setting SETTING_HTTP_RESET_COOKIES = + Setting.boolSetting("http.reset_cookies", false, Property.NodeScope); private HttpTransportSettings() { } diff --git a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index e64c6401f71..332380d9fb1 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/core/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -118,33 +119,32 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY = - Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), + Property.NodeScope); public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = - Setting.intSetting("http.netty.max_composite_buffer_components", -1, false, Setting.Scope.CLUSTER); + Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", (s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), - (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), false, Setting.Scope.CLUSTER); + (s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope); - public static final Setting SETTING_HTTP_TCP_NO_DELAY = boolSetting("http.tcp_no_delay", NetworkService.TcpSettings - .TCP_NO_DELAY, false, - Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings - .TCP_KEEP_ALIVE, false, - Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = boolSetting("http.tcp.blocking_server", NetworkService - .TcpSettings.TCP_BLOCKING_SERVER, - false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = boolSetting("http.tcp.reuse_address", NetworkService - .TcpSettings.TCP_REUSE_ADDRESS, - false, Setting.Scope.CLUSTER); + public static final Setting SETTING_HTTP_TCP_NO_DELAY = + boolSetting("http.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_KEEP_ALIVE = + boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_BLOCKING_SERVER = + boolSetting("http.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_REUSE_ADDRESS = + boolSetting("http.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope); - public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp.send_buffer_size", - NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp" + - ".receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", + public static final Setting SETTING_HTTP_TCP_SEND_BUFFER_SIZE = + Setting.byteSizeSetting("http.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, + Property.NodeScope); + public static final Setting SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = + Setting.byteSizeSetting("http.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, + Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = + Setting.byteSizeSetting("transport.netty.receive_predictor_size", settings -> { long defaultReceiverPredictor = 512 * 1024; if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) { @@ -154,13 +154,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty" + - ".receive_predictor_min", - SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); - public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("http.netty" + - ".receive_predictor_max", - SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = + byteSizeSetting("http.netty.receive_predictor_min", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); + public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = + byteSizeSetting("http.netty.receive_predictor_max", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope); protected final NetworkService networkService; @@ -262,7 +260,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent Integer.MAX_VALUE) { - logger.warn("maxContentLength[" + maxContentLength + "] set to high value, resetting it to [100mb]"); + logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength); maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB); } this.maxContentLength = maxContentLength; diff --git a/core/src/main/java/org/elasticsearch/index/Index.java b/core/src/main/java/org/elasticsearch/index/Index.java index 80bf3c31b44..3ffe13e38b1 100644 --- a/core/src/main/java/org/elasticsearch/index/Index.java +++ b/core/src/main/java/org/elasticsearch/index/Index.java @@ -19,6 +19,7 @@ package org.elasticsearch.index; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -30,7 +31,7 @@ import java.io.IOException; */ public class Index implements Writeable { - private final static Index PROTO = new Index("", ""); + public static final Index[] EMPTY_ARRAY = new Index[0]; private final String name; private final String uuid; @@ -40,6 +41,12 @@ public class Index implements Writeable { this.uuid = uuid.intern(); } + public Index(StreamInput in) throws IOException { + this.name = in.readString(); + this.uuid = in.readString(); + } + + public String getName() { return this.name; } @@ -50,7 +57,14 @@ public class Index implements Writeable { @Override public String toString() { - return "[" + name + "]"; + /* + * If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather + * than the name as the lookup key for the index. + */ + if (ClusterState.UNKNOWN_UUID.equals(uuid)) { + return "[" + name + "]"; + } + return "[" + name + "/" + uuid + "]"; } @Override @@ -72,13 +86,9 @@ public class Index implements Writeable { return result; } - public static Index readIndex(StreamInput in) throws IOException { - return PROTO.readFrom(in); - } - @Override public Index readFrom(StreamInput in) throws IOException { - return new Index(in.readString(), in.readString()); + return new Index(in); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index eabc0951e7f..48230e6ec1e 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -22,6 +22,7 @@ package org.elasticsearch.index; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -65,13 +66,17 @@ import java.util.function.Function; */ public final class IndexModule { - public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_TYPE_SETTING = + new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope); public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity"; public static final String INDEX_QUERY_CACHE = "index"; public static final String NONE_QUERY_CACHE = "none"; - public static final Setting INDEX_QUERY_CACHE_TYPE_SETTING = new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), false, Setting.Scope.INDEX); + public static final Setting INDEX_QUERY_CACHE_TYPE_SETTING = + new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), Property.IndexScope); + // for test purposes only - public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting("index.queries.cache.everything", false, false, Setting.Scope.INDEX); + public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = + Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope); private final IndexSettings indexSettings; private final IndexStoreConfig indexStoreConfig; private final AnalysisRegistry analysisRegistry; @@ -83,7 +88,7 @@ public final class IndexModule { private final Map> similarities = new HashMap<>(); private final Map> storeTypes = new HashMap<>(); private final Map> queryCaches = new HashMap<>(); - + private final SetOnce forceQueryCacheType = new SetOnce<>(); public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, AnalysisRegistry analysisRegistry) { this.indexStoreConfig = indexStoreConfig; @@ -261,11 +266,23 @@ public final class IndexModule { } indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_TYPE_SETTING, store::setType); indexSettings.getScopedSettings().addSettingsUpdateConsumer(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, store::setMaxRate); - final String queryCacheType = indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING); + final String queryCacheType = forceQueryCacheType.get() != null ? forceQueryCacheType.get() : indexSettings.getValue(INDEX_QUERY_CACHE_TYPE_SETTING); final BiFunction queryCacheProvider = queryCaches.get(queryCacheType); final QueryCache queryCache = queryCacheProvider.apply(indexSettings, indicesQueryCache); return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(), servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, listeners); } + /** + * Forces a certain query cache type. If this is set + * the given cache type is overriding the default as well as the type + * set on the index level. + * NOTE: this can only be set once + * + * @see #INDEX_QUERY_CACHE_TYPE_SETTING + */ + public void forceQueryCacheType(String type) { + this.forceQueryCacheType.set(type); + } + } diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 6b4a1851ab5..815f257a45d 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -19,18 +19,6 @@ package org.elasticsearch.index; -import java.io.Closeable; -import java.io.IOException; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; @@ -44,6 +32,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -57,10 +46,10 @@ import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexEventListener; @@ -81,6 +70,18 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.threadpool.ThreadPool; +import java.io.Closeable; +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; @@ -113,6 +114,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private volatile AsyncRefreshTask refreshTask; private volatile AsyncTranslogFSync fsyncTask; private final SearchSlowLog searchSlowLog; + private final ThreadPool threadPool; + private final BigArrays bigArrays; public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, SimilarityService similarityService, @@ -131,17 +134,24 @@ public final class IndexService extends AbstractIndexComponent implements IndexC this.indexSettings = indexSettings; this.analysisService = registry.build(indexSettings); this.similarityService = similarityService; - this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, IndexService.this::newQueryShardContext); - this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, nodeServicesProvider.getCircuitBreakerService(), mapperService); + this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, + IndexService.this::newQueryShardContext); + this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, + nodeServicesProvider.getCircuitBreakerService(), mapperService); this.shardStoreDeleter = shardStoreDeleter; + this.bigArrays = nodeServicesProvider.getBigArrays(); + this.threadPool = nodeServicesProvider.getThreadPool(); this.eventListener = eventListener; this.nodeEnv = nodeEnv; this.nodeServicesProvider = nodeServicesProvider; this.indexStore = indexStore; indexFieldData.setListener(new FieldDataCacheListener(this)); this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); - this.warmer = new IndexWarmer(indexSettings.getSettings(), nodeServicesProvider.getThreadPool(), bitsetFilterCache.createListener(nodeServicesProvider.getThreadPool())); - this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); + PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext); + this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool, + bitsetFilterCache.createListener(threadPool), + percolatorQueryCache.createListener(threadPool)); + this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache); this.engineFactory = engineFactory; // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); @@ -230,7 +240,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } } } finally { - IOUtils.close(bitsetFilterCache, indexCache, mapperService, indexFieldData, analysisService, refreshTask, fsyncTask); + IOUtils.close(bitsetFilterCache, indexCache, mapperService, indexFieldData, analysisService, refreshTask, fsyncTask, + cache().getPercolatorQueryCache()); } } } @@ -300,7 +311,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } dataPathToShardCount.put(dataPath, curCount + 1); } - path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), + path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, + routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE + ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), dataPathToShardCount); logger.debug("{} creating using a new path [{}]", shardId, path); } else { @@ -315,17 +328,22 @@ public final class IndexService extends AbstractIndexComponent implements IndexC // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); - final Engine.Warmer engineWarmer = (searcher, toLevel) -> { + final Engine.Warmer engineWarmer = (searcher) -> { IndexShard shard = getShardOrNull(shardId.getId()); if (shard != null) { - warmer.warm(searcher, shard, IndexService.this.indexSettings, toLevel); + warmer.warm(searcher, shard, IndexService.this.indexSettings); } }; - store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId))); + store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, + new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId))); if (useShadowEngine(primary, indexSettings)) { - indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer); // no indexing listeners - shadow engines don't index + indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, + indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, searchSlowLog, engineWarmer); + // no indexing listeners - shadow engines don't index } else { - indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer, listeners); + indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, + indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, searchSlowLog, engineWarmer, + listeners); } eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); @@ -370,7 +388,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC // and close the shard so no operations are allowed to it if (indexShard != null) { try { - final boolean flushEngine = deleted.get() == false && closed.get(); // only flush we are we closed (closed index or shutdown) and if we are not deleted + // only flush we are we closed (closed index or shutdown) and if we are not deleted + final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Throwable e) { logger.debug("[{}] failed to close index shard", e, shardId); @@ -417,14 +436,23 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } /** - * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via {@link QueryShardContext#setTypes(String...)} + * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via + * {@link QueryShardContext#setTypes(String...)} */ public QueryShardContext newQueryShardContext() { - return new QueryShardContext(indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry()); + return new QueryShardContext( + indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), + similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(), + indexCache.getPercolatorQueryCache() + ); } - ThreadPool getThreadPool() { - return nodeServicesProvider.getThreadPool(); + public ThreadPool getThreadPool() { + return threadPool; + } + + public BigArrays getBigArrays() { + return bigArrays; } public SearchSlowLog getSearchSlowLog() { @@ -496,21 +524,21 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } @Override - public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) { + public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) { if (shardId != null) { final IndexShard shard = indexService.getShardOrNull(shardId.id()); if (shard != null) { - shard.fieldData().onCache(shardId, fieldName, fieldDataType, ramUsage); + shard.fieldData().onCache(shardId, fieldName, ramUsage); } } } @Override - public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { if (shardId != null) { final IndexShard shard = indexService.getShardOrNull(shardId.id()); if (shard != null) { - shard.fieldData().onRemoval(shardId, fieldName, fieldDataType, wasEvicted, sizeInBytes); + shard.fieldData().onRemoval(shardId, fieldName, wasEvicted, sizeInBytes); } } } @@ -541,7 +569,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC AliasMetaData alias = aliases.get(aliasName); if (alias == null) { // This shouldn't happen unless alias disappeared after filteringAliases was called. - throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0], "Unknown alias name was passed to alias Filter"); + throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0], + "Unknown alias name was passed to alias Filter"); } Query parsedFilter = parse(alias, context); if (parsedFilter != null) { @@ -592,6 +621,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC rescheduleFsyncTask(durability); } } + + // update primary terms + for (final IndexShard shard : this.shards.values()) { + shard.updatePrimaryTerm(metadata.primaryTerm(shard.shardId().id())); + } } private void rescheduleFsyncTask(Translog.Durability durability) { @@ -717,7 +751,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } catch (Exception ex) { if (lastThrownException == null || sameException(lastThrownException, ex) == false) { // prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs - indexService.logger.warn("failed to run task {} - suppressing re-occurring exceptions unless the exception changes", ex, toString()); + indexService.logger.warn("failed to run task {} - suppressing re-occurring exceptions unless the exception changes", + ex, toString()); lastThrownException = ex; } } finally { @@ -750,7 +785,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } @Override - public void close() { + public synchronized void close() { if (closed.compareAndSet(false, true)) { FutureUtils.cancel(scheduledFuture); scheduledFuture = null; diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index b17b8ab7edf..b996e70b1e5 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -36,7 +37,6 @@ import org.elasticsearch.index.translog.Translog; import java.util.Locale; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; @@ -50,15 +50,26 @@ import java.util.function.Predicate; */ public final class IndexSettings { - public static final Setting DEFAULT_FIELD_SETTING = new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), false, Setting.Scope.INDEX); - public static final Setting QUERY_STRING_LENIENT_SETTING = Setting.boolSetting("index.query_string.lenient", false, false, Setting.Scope.INDEX); - public static final Setting QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, false, Setting.Scope.CLUSTER); - public static final Setting QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, false, Setting.Scope.CLUSTER); - public static final Setting ALLOW_UNMAPPED = Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, false, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), false, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_DURABILITY_SETTING = new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), true, Setting.Scope.INDEX); - public static final Setting INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting("index.warmer.enabled", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = Setting.boolSetting("index.ttl.disable_purge", false, true, Setting.Scope.INDEX); + public static final Setting DEFAULT_FIELD_SETTING = + new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), Property.IndexScope); + public static final Setting QUERY_STRING_LENIENT_SETTING = + Setting.boolSetting("index.query_string.lenient", false, Property.IndexScope); + public static final Setting QUERY_STRING_ANALYZE_WILDCARD = + Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, Property.NodeScope); + public static final Setting QUERY_STRING_ALLOW_LEADING_WILDCARD = + Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, Property.NodeScope); + public static final Setting ALLOW_UNMAPPED = + Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, Property.IndexScope); + public static final Setting INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = + Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), + Property.IndexScope); + public static final Setting INDEX_TRANSLOG_DURABILITY_SETTING = + new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), + (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_WARMER_ENABLED_SETTING = + Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = + Setting.boolSetting("index.ttl.disable_purge", false, Property.Dynamic, Property.IndexScope); public static final Setting INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> { switch(s) { case "false": @@ -69,7 +80,7 @@ public final class IndexSettings { default: throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); /** * Index setting describing the maximum value of from + size on a query. @@ -79,10 +90,15 @@ public final class IndexSettings { * safely. 1,000,000 is probably way to high for any cluster to set * safely. */ - public static final Setting MAX_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_result_window", 10000, 1, true, Setting.Scope.INDEX); + public static final Setting MAX_RESULT_WINDOW_SETTING = + Setting.intSetting("index.max_result_window", 10000, 1, Property.Dynamic, Property.IndexScope); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); - public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX); - public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), true, Setting.Scope.INDEX); + public static final Setting INDEX_REFRESH_INTERVAL_SETTING = + Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = + Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, + Property.IndexScope); /** @@ -90,7 +106,9 @@ public final class IndexSettings { * This setting is realtime updateable */ public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); - public static final Setting INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX); + public static final Setting INDEX_GC_DELETES_SETTING = + Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, + Property.IndexScope); private final Index index; private final Version version; diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java index 9fabc8efc40..499031af970 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -19,18 +19,11 @@ package org.elasticsearch.index; -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.ObjectSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.DocumentMapper; @@ -54,16 +47,12 @@ import java.util.concurrent.TimeUnit; */ public final class IndexWarmer extends AbstractComponent { - public static final Setting INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", - MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY), - false, Setting.Scope.INDEX); private final List listeners; IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) { super(settings); ArrayList list = new ArrayList<>(); final Executor executor = threadPool.executor(ThreadPool.Names.WARMER); - list.add(new NormsWarmer(executor)); list.add(new FieldDataWarmer(executor)); for (Listener listener : listeners) { list.add(listener); @@ -71,7 +60,7 @@ public final class IndexWarmer extends AbstractComponent { this.listeners = Collections.unmodifiableList(list); } - void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings, boolean isTopReader) { + void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings) { if (shard.state() == IndexShardState.CLOSED) { return; } @@ -79,22 +68,14 @@ public final class IndexWarmer extends AbstractComponent { return; } if (logger.isTraceEnabled()) { - if (isTopReader) { - logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader()); - } else { - logger.trace("{} warming [{}]", shard.shardId(), searcher.reader()); - } + logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader()); } shard.warmerService().onPreWarm(); long time = System.nanoTime(); final List terminationHandles = new ArrayList<>(); // get a handle on pending tasks for (final Listener listener : listeners) { - if (isTopReader) { - terminationHandles.add(listener.warmTopReader(shard, searcher)); - } else { - terminationHandles.add(listener.warmNewReaders(shard, searcher)); - } + terminationHandles.add(listener.warmReader(shard, searcher)); } // wait for termination for (TerminationHandle terminationHandle : terminationHandles) { @@ -102,22 +83,14 @@ public final class IndexWarmer extends AbstractComponent { terminationHandle.awaitTermination(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - if (isTopReader) { - logger.warn("top warming has been interrupted", e); - } else { - logger.warn("warming has been interrupted", e); - } + logger.warn("top warming has been interrupted", e); break; } } long took = System.nanoTime() - time; shard.warmerService().onPostWarm(took); if (shard.warmerService().logger().isTraceEnabled()) { - if (isTopReader) { - shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS)); - } else { - shard.warmerService().logger().trace("warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS)); - } + shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS)); } } @@ -132,67 +105,7 @@ public final class IndexWarmer extends AbstractComponent { public interface Listener { /** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the * execution of those tasks. */ - TerminationHandle warmNewReaders(IndexShard indexShard, Engine.Searcher searcher); - - TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher); - } - - private static class NormsWarmer implements IndexWarmer.Listener { - private final Executor executor; - public NormsWarmer(Executor executor) { - this.executor = executor; - } - @Override - public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { - final MappedFieldType.Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING); - final MapperService mapperService = indexShard.mapperService(); - final ObjectSet warmUp = new ObjectHashSet<>(); - for (DocumentMapper docMapper : mapperService.docMappers(false)) { - for (FieldMapper fieldMapper : docMapper.mappers()) { - final String indexName = fieldMapper.fieldType().name(); - MappedFieldType.Loading normsLoading = fieldMapper.fieldType().normsLoading(); - if (normsLoading == null) { - normsLoading = defaultLoading; - } - if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE && !fieldMapper.fieldType().omitNorms() - && normsLoading == MappedFieldType.Loading.EAGER) { - warmUp.add(indexName); - } - } - } - - final CountDownLatch latch = new CountDownLatch(1); - // Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task - executor.execute(() -> { - try { - for (ObjectCursor stringObjectCursor : warmUp) { - final String indexName = stringObjectCursor.value; - final long start = System.nanoTime(); - for (final LeafReaderContext ctx : searcher.reader().leaves()) { - final NumericDocValues values = ctx.reader().getNormValues(indexName); - if (values != null) { - values.get(0); - } - } - if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, - TimeValue.timeValueNanos(System.nanoTime() - start)); - } - } - } catch (Throwable t) { - indexShard.warmerService().logger().warn("failed to warm-up norms", t); - } finally { - latch.countDown(); - } - }); - - return () -> latch.await(); - } - - @Override - public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) { - return TerminationHandle.NO_WAIT; - } + TerminationHandle warmReader(IndexShard indexShard, Engine.Searcher searcher); } private static class FieldDataWarmer implements IndexWarmer.Listener { @@ -203,67 +116,17 @@ public final class IndexWarmer extends AbstractComponent { } @Override - public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { - final MapperService mapperService = indexShard.mapperService(); - final Map warmUp = new HashMap<>(); - for (DocumentMapper docMapper : mapperService.docMappers(false)) { - for (FieldMapper fieldMapper : docMapper.mappers()) { - final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType(); - final String indexName = fieldMapper.fieldType().name(); - if (fieldDataType == null) { - continue; - } - if (fieldDataType.getLoading() == MappedFieldType.Loading.LAZY) { - continue; - } - - if (warmUp.containsKey(indexName)) { - continue; - } - warmUp.put(indexName, fieldMapper.fieldType()); - } - } - final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService(); - final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size()); - for (final LeafReaderContext ctx : searcher.reader().leaves()) { - for (final MappedFieldType fieldType : warmUp.values()) { - executor.execute(() -> { - try { - final long start = System.nanoTime(); - indexFieldDataService.getForField(fieldType).load(ctx); - if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldType.name(), - TimeValue.timeValueNanos(System.nanoTime() - start)); - } - } catch (Throwable t) { - indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldType.name()); - } finally { - latch.countDown(); - } - }); - } - } - return () -> latch.await(); - } - - @Override - public TerminationHandle warmTopReader(final IndexShard indexShard, final Engine.Searcher searcher) { + public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) { final MapperService mapperService = indexShard.mapperService(); final Map warmUpGlobalOrdinals = new HashMap<>(); for (DocumentMapper docMapper : mapperService.docMappers(false)) { for (FieldMapper fieldMapper : docMapper.mappers()) { - final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType(); - final String indexName = fieldMapper.fieldType().name(); - if (fieldDataType == null) { + final MappedFieldType fieldType = fieldMapper.fieldType(); + final String indexName = fieldType.name(); + if (fieldType.eagerGlobalOrdinals() == false) { continue; } - if (fieldDataType.getLoading() != MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS) { - continue; - } - if (warmUpGlobalOrdinals.containsKey(indexName)) { - continue; - } - warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType()); + warmUpGlobalOrdinals.put(indexName, fieldType); } } final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService(); @@ -273,7 +136,12 @@ public final class IndexWarmer extends AbstractComponent { try { final long start = System.nanoTime(); IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType); - ifd.loadGlobal(searcher.getDirectoryReader()); + DirectoryReader reader = searcher.getDirectoryReader(); + IndexFieldData global = ifd.loadGlobal(reader); + if (reader.leaves().isEmpty() == false) { + global.load(reader.leaves().get(0)); + } + if (indexShard.warmerService().logger().isTraceEnabled()) { indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(), TimeValue.timeValueNanos(System.nanoTime() - start)); diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index 5452daa7f07..ff10179f026 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.engine.Engine; @@ -36,6 +37,7 @@ import java.util.concurrent.TimeUnit; /** */ public final class IndexingSlowLog implements IndexingOperationListener { + private final Index index; private boolean reformat; private long indexWarnThreshold; private long indexInfoThreshold; @@ -51,15 +53,25 @@ public final class IndexingSlowLog implements IndexingOperationListener { private SlowLogLevel level; private final ESLogger indexLogger; - private final ESLogger deleteLogger; private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog"; - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = + Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = + Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = + new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic, + Property.IndexScope); /** * Reads how much of the source to log. The user can specify any value they * like and numbers are interpreted the maximum number of characters to log @@ -72,19 +84,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { } catch (NumberFormatException e) { return Booleans.parseBoolean(value, true) ? Integer.MAX_VALUE : 0; } - }, true, Setting.Scope.INDEX); + }, Property.Dynamic, Property.IndexScope); IndexingSlowLog(IndexSettings indexSettings) { - this(indexSettings, Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index"), - Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".delete")); - } - - /** - * Build with the specified loggers. Only used to testing. - */ - IndexingSlowLog(IndexSettings indexSettings, ESLogger indexLogger, ESLogger deleteLogger) { - this.indexLogger = indexLogger; - this.deleteLogger = deleteLogger; + this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings()); + this.index = indexSettings.getIndex(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat); this.reformat = indexSettings.getValue(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); @@ -109,7 +113,6 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void setLevel(SlowLogLevel level) { this.level = level; this.indexLogger.setLevel(level.name()); - this.deleteLogger.setLevel(level.name()); } private void setWarnThreshold(TimeValue warnThreshold) { @@ -141,13 +144,13 @@ public final class IndexingSlowLog implements IndexingOperationListener { private void postIndexing(ParsedDocument doc, long tookInNanos) { if (indexWarnThreshold >= 0 && tookInNanos > indexWarnThreshold) { - indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.warn("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexInfoThreshold >= 0 && tookInNanos > indexInfoThreshold) { - indexLogger.info("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.info("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexDebugThreshold >= 0 && tookInNanos > indexDebugThreshold) { - indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.debug("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } else if (indexTraceThreshold >= 0 && tookInNanos > indexTraceThreshold) { - indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(doc, tookInNanos, reformat, maxSourceCharsToLog)); + indexLogger.trace("{}", new SlowLogParsedDocumentPrinter(index, doc, tookInNanos, reformat, maxSourceCharsToLog)); } } @@ -156,9 +159,11 @@ public final class IndexingSlowLog implements IndexingOperationListener { private final long tookInNanos; private final boolean reformat; private final int maxSourceCharsToLog; + private final Index index; - SlowLogParsedDocumentPrinter(ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { + SlowLogParsedDocumentPrinter(Index index, ParsedDocument doc, long tookInNanos, boolean reformat, int maxSourceCharsToLog) { this.doc = doc; + this.index = index; this.tookInNanos = tookInNanos; this.reformat = reformat; this.maxSourceCharsToLog = maxSourceCharsToLog; @@ -167,6 +172,7 @@ public final class IndexingSlowLog implements IndexingOperationListener { @Override public String toString() { StringBuilder sb = new StringBuilder(); + sb.append(index).append(" "); sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); sb.append("type[").append(doc.type()).append("], "); sb.append("id[").append(doc.id()).append("], "); diff --git a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index fc9f30cf3fd..c8d82eae888 100644 --- a/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.TieredMergePolicy; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -126,15 +127,31 @@ public final class MergePolicyConfig { public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; - public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, true, Setting.Scope.INDEX); + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = + new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, + Property.Dynamic, Property.IndexScope); - public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, true, Setting.Scope.INDEX); - public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, true, Setting.Scope.INDEX); + public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = + Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = + Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = + Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = + Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = + Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = + Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = + Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, + Property.Dynamic, Property.IndexScope); public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin diff --git a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java index 0d212a4eb30..2eb43a50ee4 100644 --- a/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/MergeSchedulerConfig.java @@ -21,6 +21,7 @@ package org.elasticsearch.index; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.concurrent.EsExecutors; /** @@ -51,9 +52,17 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; */ public final class MergeSchedulerConfig { - public static final Setting MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), true, Setting.Scope.INDEX); - public static final Setting MAX_MERGE_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_merge_count", (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), true, Setting.Scope.INDEX); - public static final Setting AUTO_THROTTLE_SETTING = Setting.boolSetting("index.merge.scheduler.auto_throttle", true, true, Setting.Scope.INDEX); + public static final Setting MAX_THREAD_COUNT_SETTING = + new Setting<>("index.merge.scheduler.max_thread_count", + (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), + (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic, + Property.IndexScope); + public static final Setting MAX_MERGE_COUNT_SETTING = + new Setting<>("index.merge.scheduler.max_merge_count", + (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), + (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), Property.Dynamic, Property.IndexScope); + public static final Setting AUTO_THROTTLE_SETTING = + Setting.boolSetting("index.merge.scheduler.auto_throttle", true, Property.Dynamic, Property.IndexScope); private volatile boolean autoThrottle; private volatile int maxThreadCount; diff --git a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java index df3139fe57c..cfa779d64aa 100644 --- a/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.internal.SearchContext; @@ -50,16 +51,35 @@ public final class SearchSlowLog { private final ESLogger fetchLogger; private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog"; - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_REFORMAT = Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, true, Setting.Scope.INDEX); - public static final Setting INDEX_SEARCH_SLOWLOG_LEVEL = new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = + Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), + TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_REFORMAT = + Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_SEARCH_SLOWLOG_LEVEL = + new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic, + Property.IndexScope); public SearchSlowLog(IndexSettings indexSettings) { diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java index b7481e78496..1054721535e 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -67,8 +67,10 @@ import org.elasticsearch.env.Environment; import java.io.BufferedReader; import java.io.IOException; import java.io.Reader; +import java.nio.charset.CharacterCodingException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -163,7 +165,8 @@ public class Analysis { NAMED_STOP_WORDS = unmodifiableMap(namedStopWords); } - public static CharArraySet parseWords(Environment env, Settings settings, String name, CharArraySet defaultWords, Map> namedWords, boolean ignoreCase) { + public static CharArraySet parseWords(Environment env, Settings settings, String name, CharArraySet defaultWords, + Map> namedWords, boolean ignoreCase) { String value = settings.get(name); if (value != null) { if ("_none_".equals(value)) { @@ -237,12 +240,17 @@ public class Analysis { } } - final Path wordListFile = env.configFile().resolve(wordListPath); + final Path path = env.configFile().resolve(wordListPath); - try (BufferedReader reader = FileSystemUtils.newBufferedReader(wordListFile.toUri().toURL(), StandardCharsets.UTF_8)) { + try (BufferedReader reader = FileSystemUtils.newBufferedReader(path.toUri().toURL(), StandardCharsets.UTF_8)) { return loadWordList(reader, "#"); + } catch (CharacterCodingException ex) { + String message = String.format(Locale.ROOT, + "Unsupported character encoding detected while reading %s_path: %s - files must be UTF-8 encoded", + settingPrefix, path.toString()); + throw new IllegalArgumentException(message, ex); } catch (IOException ioe) { - String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix); + String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, path.toString()); throw new IllegalArgumentException(message, ioe); } } @@ -256,7 +264,7 @@ public class Analysis { } else { br = new BufferedReader(reader); } - String word = null; + String word; while ((word = br.readLine()) != null) { if (!Strings.hasText(word)) { continue; @@ -283,13 +291,16 @@ public class Analysis { if (filePath == null) { return null; } - final Path path = env.configFile().resolve(filePath); - try { return FileSystemUtils.newBufferedReader(path.toUri().toURL(), StandardCharsets.UTF_8); + } catch (CharacterCodingException ex) { + String message = String.format(Locale.ROOT, + "Unsupported character encoding detected while reading %s_path: %s files must be UTF-8 encoded", + settingPrefix, path.toString()); + throw new IllegalArgumentException(message, ex); } catch (IOException ioe) { - String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix); + String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, path.toString()); throw new IllegalArgumentException(message, ioe); } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index a8a7b4fe004..3c2d6bfb260 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -85,6 +85,10 @@ public final class AnalysisRegistry implements Closeable { this.analyzers = Collections.unmodifiableMap(analyzerBuilder); } + public HunspellService getHunspellService() { + return hunspellService; + } + /** * Returns a registered {@link TokenizerFactory} provider by name or null if the tokenizer was not registered */ diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java index 09e96f3743b..b9146df8c96 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java @@ -127,7 +127,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable } if (analyzers.containsKey("default_index")) { final Version createdVersion = indexSettings.getIndexVersionCreated(); - if (createdVersion.onOrAfter(Version.V_5_0_0)) { + if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) { throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]"); } else { deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName()); @@ -155,7 +155,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable // because analyzers are aliased, they might be closed several times // an NPE is thrown in this case, so ignore.... } catch (Exception e) { - logger.debug("failed to close analyzer " + analyzer); + logger.debug("failed to close analyzer {}", analyzer); } } } diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index 25ff8f96834..1dd562c4bb1 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -93,7 +93,7 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { public String toString() { return "analyzer name[" + name + "], analyzer [" + analyzer + "]"; } - + /** It is an error if this is ever used, it means we screwed up! */ static final ReuseStrategy ERROR_STRATEGY = new Analyzer.ReuseStrategy() { @Override diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java index e90409421d2..77716e7a43d 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericDoubleAnalyzer.java @@ -56,4 +56,4 @@ public class NumericDoubleAnalyzer extends NumericAnalyzer protected NumericFloatTokenizer createNumericTokenizer(char[] buffer) throws IOException { return new NumericFloatTokenizer(precisionStep, buffer); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java index ab112396392..9b865920341 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NumericLongAnalyzer.java @@ -56,4 +56,4 @@ public class NumericLongAnalyzer extends NumericAnalyzer { protected NumericLongTokenizer createNumericTokenizer(char[] buffer) throws IOException { return new NumericLongTokenizer(precisionStep, buffer); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 61733f24695..b41f5bc0125 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.query.QueryCache; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import java.io.Closeable; import java.io.IOException; @@ -35,11 +36,14 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { private final QueryCache queryCache; private final BitsetFilterCache bitsetFilterCache; + private final PercolatorQueryCache percolatorQueryCache; - public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache) { + public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache, + PercolatorQueryCache percolatorQueryCache) { super(indexSettings); this.queryCache = queryCache; this.bitsetFilterCache = bitsetFilterCache; + this.percolatorQueryCache = percolatorQueryCache; } public QueryCache query() { @@ -53,9 +57,13 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { return bitsetFilterCache; } + public PercolatorQueryCache getPercolatorQueryCache() { + return percolatorQueryCache; + } + @Override public void close() throws IOException { - IOUtils.close(queryCache, bitsetFilterCache); + IOUtils.close(queryCache, bitsetFilterCache, percolatorQueryCache); } public void clear(String reason) { diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index f7802330ab7..2452e8147c2 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; @@ -70,7 +71,8 @@ import java.util.concurrent.Executor; */ public final class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener>, Closeable { - public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, false, Setting.Scope.INDEX); + public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = + Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope); private final boolean loadRandomAccessFiltersEagerly; private final Cache> loadedFilters; @@ -214,7 +216,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L } @Override - public IndexWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) { + public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) { if (indexSettings.getIndex().equals(indexShard.indexSettings().getIndex()) == false) { // this is from a different index return TerminationHandle.NO_WAIT; @@ -266,11 +268,6 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L return () -> latch.await(); } - @Override - public TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher) { - return TerminationHandle.NO_WAIT; - } - } Cache> getLoadedFilters() { diff --git a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 6dd710e4e89..965a2e58f9c 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -129,9 +129,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { merge.rateLimiter.getMBPerSec()); if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it - logger.debug(message); + logger.debug("{}", message); } else if (logger.isTraceEnabled()) { - logger.trace(message); + logger.trace("{}", message); } } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 3c5583440e0..c66073bd91f 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -671,7 +671,7 @@ public abstract class Engine implements Closeable { closeNoLock("engine failed on: [" + reason + "]"); } finally { if (failedEngine != null) { - logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", reason, failure); + logger.debug("tried to fail engine but engine is already failed. ignoring. [{}]", failure, reason); return; } logger.warn("failed engine [{}]", failure, reason); @@ -697,7 +697,7 @@ public abstract class Engine implements Closeable { store.decRef(); } } else { - logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason, failure); + logger.debug("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", failure, reason); } } @@ -1219,12 +1219,9 @@ public abstract class Engine implements Closeable { */ public interface Warmer { /** - * Called once a new Searcher is opened. - * @param searcher the searcer to warm - * @param isTopLevelReader true iff the searcher is build from a top-level reader. - * Otherwise the searcher might be build from a leaf reader to warm in isolation + * Called once a new Searcher is opened on the top-level searcher. */ - void warm(Engine.Searcher searcher, boolean isTopLevelReader); + void warm(Engine.Searcher searcher); } /** diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 9740ccd0358..47001f40309 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -39,7 +40,7 @@ import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Set; +import java.util.function.Function; /* * Holds all the configuration that is used to create an {@link Engine}. @@ -70,20 +71,23 @@ public final class EngineConfig { /** * Index setting to change the low level lucene codec used for writing new segments. * This setting is not realtime updateable. + * This setting is also settable on the node and the index level, it's commonly used in hot/cold node archs where index is likely + * allocated on both `kind` of nodes. */ - public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", (s) -> { - switch(s) { + public static final Setting INDEX_CODEC_SETTING = new Setting<>("index.codec", "default", s -> { + switch (s) { case "default": case "best_compression": case "lucene_default": return s; default: if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones - throw new IllegalArgumentException("unknown value for [index.codec] must be one of [default, best_compression] but was: " + s); + throw new IllegalArgumentException( + "unknown value for [index.codec] must be one of [default, best_compression] but was: " + s); } return s; } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope, Property.NodeScope); /** if set to true the engine will start even if the translog id in the commit point can not be found */ public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog"; @@ -98,12 +102,13 @@ public final class EngineConfig { IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, MergePolicy mergePolicy,Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.EventListener eventListener, - TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig, TimeValue flushMergesAfter) { + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, + TranslogConfig translogConfig, TimeValue flushMergesAfter) { this.shardId = shardId; final Settings settings = indexSettings.getSettings(); this.indexSettings = indexSettings; this.threadPool = threadPool; - this.warmer = warmer == null ? (a,b) -> {} : warmer; + this.warmer = warmer == null ? (a) -> {} : warmer; this.store = store; this.deletionPolicy = deletionPolicy; this.mergePolicy = mergePolicy; @@ -139,7 +144,8 @@ public final class EngineConfig { } /** - * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled by {@link IndexingMemoryController} + * Returns the initial index buffer size. This setting is only read on startup and otherwise controlled + * by {@link IndexingMemoryController} */ public ByteSizeValue getIndexingBufferSize() { return indexingBufferSize; @@ -147,11 +153,12 @@ public final class EngineConfig { /** * Returns true iff delete garbage collection in the engine should be enabled. This setting is updateable - * in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. The default is true + * in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. + * The default is true *

* Engine GC deletion if enabled collects deleted documents from in-memory realtime data structures after a certain amount of - * time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document that was deleted - * to fail. + * time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document + * that was deleted to fail. *

*/ public boolean isEnableGcDeletes() { @@ -169,7 +176,8 @@ public final class EngineConfig { } /** - * Returns a thread-pool mainly used to get estimated time stamps from {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule + * Returns a thread-pool mainly used to get estimated time stamps from + * {@link org.elasticsearch.threadpool.ThreadPool#estimatedTimeInMillis()} and to schedule * async force merge calls on the {@link org.elasticsearch.threadpool.ThreadPool.Names#FORCE_MERGE} thread-pool */ public ThreadPool getThreadPool() { @@ -184,8 +192,9 @@ public final class EngineConfig { } /** - * Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the {@link org.apache.lucene.store.Directory} - * used for the engines {@link org.apache.lucene.index.IndexWriter} to write it's index files to. + * Returns the {@link org.elasticsearch.index.store.Store} instance that provides access to the + * {@link org.apache.lucene.store.Directory} used for the engines {@link org.apache.lucene.index.IndexWriter} to write it's index files + * to. *

* Note: In order to use this instance the consumer needs to increment the stores reference before it's used the first time and hold * it's reference until it's not needed anymore. diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 01f02025aeb..aa62f255bb4 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -24,13 +24,10 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriter.IndexReaderWarmer; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; @@ -51,7 +48,6 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.lucene.index.ElasticsearchLeafReader; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.math.MathUtils; import org.elasticsearch.common.unit.ByteSizeValue; @@ -70,7 +66,6 @@ import org.elasticsearch.index.translog.TranslogCorruptedException; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -91,7 +86,6 @@ public class InternalEngine extends Engine { */ private volatile long lastDeleteVersionPruneTimeMSec; - private final Engine.Warmer warmer; private final Translog translog; private final ElasticsearchConcurrentMergeScheduler mergeScheduler; @@ -131,7 +125,6 @@ public class InternalEngine extends Engine { boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); - this.warmer = engineConfig.getWarmer(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough... for (int i = 0; i < dirtyLocks.length; i++) { @@ -743,7 +736,9 @@ public class InternalEngine extends Engine { indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/); } if (flush) { - flush(true, true); + if (tryRenewSyncCommit() == false) { + flush(false, true); + } } if (upgrade) { logger.info("finished segment upgrade"); @@ -929,30 +924,6 @@ public class InternalEngine extends Engine { iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh - // Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end - // of the merge operation and won't slow down _refresh - iwc.setMergedSegmentWarmer(new IndexReaderWarmer() { - @Override - public void warm(LeafReader reader) throws IOException { - try { - LeafReader esLeafReader = new ElasticsearchLeafReader(reader, shardId); - assert isMergedSegment(esLeafReader); - if (warmer != null) { - final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(esLeafReader, null)); - warmer.warm(searcher, false); - } - } catch (Throwable t) { - // Don't fail a merge if the warm-up failed - if (isClosed.get() == false) { - logger.warn("Warm-up failed", t); - } - if (t instanceof Error) { - // assertion/out-of-memory error, don't ignore those - throw (Error) t; - } - } - } - }); return new IndexWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { logger.warn("could not lock IndexWriter", ex); @@ -963,14 +934,12 @@ public class InternalEngine extends Engine { /** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */ final static class SearchFactory extends EngineSearcherFactory { private final Engine.Warmer warmer; - private final ShardId shardId; private final ESLogger logger; private final AtomicBoolean isEngineClosed; SearchFactory(ESLogger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) { super(engineConfig); warmer = engineConfig.getWarmer(); - shardId = engineConfig.getShardId(); this.logger = logger; this.isEngineClosed = isEngineClosed; } @@ -985,55 +954,13 @@ public class InternalEngine extends Engine { return searcher; } if (warmer != null) { - // we need to pass a custom searcher that does not release anything on Engine.Search Release, - // we will release explicitly - IndexSearcher newSearcher = null; - boolean closeNewSearcher = false; try { - if (previousReader == null) { - // we are starting up - no writer active so we can't acquire a searcher. - newSearcher = searcher; - } else { - // figure out the newSearcher, with only the new readers that are relevant for us - List readers = new ArrayList<>(); - for (LeafReaderContext newReaderContext : reader.leaves()) { - if (isMergedSegment(newReaderContext.reader())) { - // merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer - continue; - } - boolean found = false; - for (LeafReaderContext currentReaderContext : previousReader.leaves()) { - if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) { - found = true; - break; - } - } - if (!found) { - readers.add(newReaderContext.reader()); - } - } - if (!readers.isEmpty()) { - // we don't want to close the inner readers, just increase ref on them - IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false); - newSearcher = super.newSearcher(newReader, null); - closeNewSearcher = true; - } - } - - if (newSearcher != null) { - warmer.warm(new Searcher("new_reader_warming", newSearcher), false); - } assert searcher.getIndexReader() instanceof ElasticsearchDirectoryReader : "this class needs an ElasticsearchDirectoryReader but got: " + searcher.getIndexReader().getClass(); - warmer.warm(new Searcher("top_reader_warming", searcher), true); + warmer.warm(new Searcher("top_reader_warming", searcher)); } catch (Throwable e) { if (isEngineClosed.get() == false) { logger.warn("failed to prepare/warm", e); } - } finally { - // no need to release the fullSearcher, nothing really is done... - if (newSearcher != null && closeNewSearcher) { - IOUtils.closeWhileHandlingException(newSearcher.getIndexReader()); // ignore - } } } return searcher; diff --git a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index d2c4a3c14c0..10948f35d54 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/core/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -348,7 +348,7 @@ public class SegmentsStats implements Streamable, ToXContent { indexWriterMaxMemoryInBytes = in.readLong(); bitsetMemoryInBytes = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_5_0_0)) { + if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) { int size = in.readVInt(); ImmutableOpenMap.Builder map = ImmutableOpenMap.builder(size); for (int i = 0; i < size; i++) { @@ -376,7 +376,7 @@ public class SegmentsStats implements Streamable, ToXContent { out.writeLong(indexWriterMaxMemoryInBytes); out.writeLong(bitsetMemoryInBytes); - if (out.getVersion().onOrAfter(Version.V_5_0_0)) { + if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) { out.writeVInt(fileSizes.size()); for (Iterator> it = fileSizes.iterator(); it.hasNext();) { ObjectObjectCursor entry = it.next(); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java b/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java deleted file mode 100644 index 371b802dc0c..00000000000 --- a/core/src/main/java/org/elasticsearch/index/fielddata/FieldDataType.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.fielddata; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MappedFieldType.Loading; - -/** - */ -public class FieldDataType { - - public static final String FORMAT_KEY = "format"; - public static final String DOC_VALUES_FORMAT_VALUE = "doc_values"; - - private final String type; - private final String typeFormat; - private final Loading loading; - private final Settings settings; - - public FieldDataType(String type) { - this(type, Settings.Builder.EMPTY_SETTINGS); - } - - public FieldDataType(String type, Settings.Builder builder) { - this(type, builder.build()); - } - - public FieldDataType(String type, Settings settings) { - this.type = type; - this.typeFormat = "index.fielddata.type." + type + "." + FORMAT_KEY; - this.settings = settings; - final String loading = settings.get(Loading.KEY); - this.loading = Loading.parse(loading, Loading.LAZY); - } - - public String getType() { - return this.type; - } - - public Settings getSettings() { - return this.settings; - } - - public Loading getLoading() { - return loading; - } - - public String getFormat(Settings indexSettings) { - String format = settings.get(FORMAT_KEY); - if (format == null && indexSettings != null) { - format = indexSettings.get(typeFormat); - } - return format; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - FieldDataType that = (FieldDataType) o; - - if (!settings.equals(that.settings)) return false; - if (!type.equals(that.type)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = type.hashCode(); - result = 31 * result + settings.hashCode(); - return result; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index ffa23bf56e4..feacfe59996 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -20,10 +20,14 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; @@ -63,18 +67,6 @@ public interface IndexFieldData extends IndexCompone return null; } } - - /** - * Gets a memory storage hint that should be honored if possible but is not mandatory - */ - public static MemoryStorageFormat getMemoryStorageHint(FieldDataType fieldDataType) { - // backwards compatibility - String s = fieldDataType.getSettings().get("ordinals"); - if (s != null) { - return "always".equals(s) ? MemoryStorageFormat.ORDINALS : null; - } - return MemoryStorageFormat.fromString(fieldDataType.getSettings().get(SETTING_MEMORY_STORAGE_HINT)); - } } /** @@ -82,11 +74,6 @@ public interface IndexFieldData extends IndexCompone */ String getFieldName(); - /** - * The field data type. - */ - FieldDataType getFieldDataType(); - /** * Loads the atomic field data for the reader, possibly cached. */ @@ -122,11 +109,11 @@ public interface IndexFieldData extends IndexCompone public static class Nested { private final BitSetProducer rootFilter; - private final Weight innerFilter; + private final Query innerQuery; - public Nested(BitSetProducer rootFilter, Weight innerFilter) { + public Nested(BitSetProducer rootFilter, Query innerQuery) { this.rootFilter = rootFilter; - this.innerFilter = innerFilter; + this.innerQuery = innerQuery; } /** @@ -140,7 +127,10 @@ public interface IndexFieldData extends IndexCompone * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { - Scorer s = innerFilter.scorer(ctx); + final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); + IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); + Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); + Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java index 68c05aba3fe..948b19a8afb 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java @@ -48,12 +48,12 @@ public interface IndexFieldDataCache { /** * Called after the fielddata is loaded during the cache phase */ - default void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage){} + default void onCache(ShardId shardId, String fieldName, Accountable ramUsage){} /** * Called after the fielddata is unloaded */ - default void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes){} + default void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes){} } class None implements IndexFieldDataCache { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 94e9edc5b94..30e1086e187 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -24,6 +24,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData; @@ -67,118 +68,21 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo default: throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]"); } - }, false, Setting.Scope.INDEX); + }, Property.IndexScope); - private static final IndexFieldData.Builder MISSING_DOC_VALUES_BUILDER = (indexProperties, fieldType, cache, breakerService, mapperService1) -> { - throw new IllegalStateException("Can't load fielddata on [" + fieldType.name() - + "] of index [" + indexProperties.getIndex().getName() + "] because fielddata is unsupported on fields of type [" - + fieldType.fieldDataType().getType() + "]. Use doc values instead."); - }; - - private static final String ARRAY_FORMAT = "array"; - private static final String DISABLED_FORMAT = "disabled"; - private static final String DOC_VALUES_FORMAT = "doc_values"; - private static final String PAGED_BYTES_FORMAT = "paged_bytes"; - - private static final IndexFieldData.Builder DISABLED_BUILDER = new IndexFieldData.Builder() { - @Override - public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, - CircuitBreakerService breakerService, MapperService mapperService) { - throw new IllegalStateException("Field data loading is forbidden on [" + fieldType.name() + "]"); - } - }; - - private final static Map buildersByType; - private final static Map docValuesBuildersByType; - private final static Map, IndexFieldData.Builder> buildersByTypeAndFormat; private final CircuitBreakerService circuitBreakerService; - static { - Map buildersByTypeBuilder = new HashMap<>(); - buildersByTypeBuilder.put("string", new PagedBytesIndexFieldData.Builder()); - buildersByTypeBuilder.put(TextFieldMapper.CONTENT_TYPE, new PagedBytesIndexFieldData.Builder()); - buildersByTypeBuilder.put(KeywordFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("float", MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("double", MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("byte", MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("short", MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("int", MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("long", MISSING_DOC_VALUES_BUILDER); - buildersByTypeBuilder.put("geo_point", new GeoPointArrayIndexFieldData.Builder()); - buildersByTypeBuilder.put(ParentFieldMapper.NAME, new ParentChildIndexFieldData.Builder()); - buildersByTypeBuilder.put(IndexFieldMapper.NAME, new IndexIndexFieldData.Builder()); - buildersByTypeBuilder.put("binary", DISABLED_BUILDER); - buildersByTypeBuilder.put(BooleanFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER); - buildersByType = unmodifiableMap(buildersByTypeBuilder); - - - docValuesBuildersByType = MapBuilder.newMapBuilder() - .put("string", new DocValuesIndexFieldData.Builder()) - .put(KeywordFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder()) - .put("float", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT)) - .put("double", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE)) - .put("byte", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE)) - .put("short", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT)) - .put("int", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT)) - .put("long", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG)) - .put("geo_point", new AbstractGeoPointDVIndexFieldData.Builder()) - .put("binary", new BytesBinaryDVIndexFieldData.Builder()) - .put(BooleanFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BOOLEAN)) - .immutableMap(); - - buildersByTypeAndFormat = MapBuilder., IndexFieldData.Builder>newMapBuilder() - .put(Tuple.tuple("string", PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder()) - .put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder()) - .put(Tuple.tuple("string", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple(TextFieldMapper.CONTENT_TYPE, PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder()) - .put(Tuple.tuple(TextFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder()) - .put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT)) - .put(Tuple.tuple("float", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("double", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE)) - .put(Tuple.tuple("double", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("byte", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE)) - .put(Tuple.tuple("byte", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("short", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT)) - .put(Tuple.tuple("short", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("int", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT)) - .put(Tuple.tuple("int", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("long", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG)) - .put(Tuple.tuple("long", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("geo_point", ARRAY_FORMAT), new GeoPointArrayIndexFieldData.Builder()) - .put(Tuple.tuple("geo_point", DOC_VALUES_FORMAT), new AbstractGeoPointDVIndexFieldData.Builder()) - .put(Tuple.tuple("geo_point", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple("binary", DOC_VALUES_FORMAT), new BytesBinaryDVIndexFieldData.Builder()) - .put(Tuple.tuple("binary", DISABLED_FORMAT), DISABLED_BUILDER) - - .put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BOOLEAN)) - .put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER) - - .immutableMap(); - } - private final IndicesFieldDataCache indicesFieldDataCache; // the below map needs to be modified under a lock private final Map fieldDataCaches = new HashMap<>(); private final MapperService mapperService; private static final IndexFieldDataCache.Listener DEFAULT_NOOP_LISTENER = new IndexFieldDataCache.Listener() { @Override - public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) { + public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) { } @Override - public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { } }; private volatile IndexFieldDataCache.Listener listener = DEFAULT_NOOP_LISTENER; @@ -222,42 +126,15 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo @SuppressWarnings("unchecked") public > IFD getForField(MappedFieldType fieldType) { final String fieldName = fieldType.name(); - final FieldDataType type = fieldType.fieldDataType(); - if (type == null) { - throw new IllegalArgumentException("found no fielddata type for field [" + fieldName + "]"); - } - final boolean docValues = fieldType.hasDocValues(); - IndexFieldData.Builder builder = null; - String format = type.getFormat(indexSettings.getSettings()); - if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) { - logger.warn("field [" + fieldName + "] has no doc values, will use default field data format"); - format = null; - } - if (format != null) { - builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format)); - if (builder == null) { - logger.warn("failed to find format [" + format + "] for field [" + fieldName + "], will use default"); - } - } - if (builder == null && docValues) { - builder = docValuesBuildersByType.get(type.getType()); - } - if (builder == null) { - builder = buildersByType.get(type.getType()); - } - if (builder == null) { - throw new IllegalArgumentException("failed to find field data builder for field " + fieldName + ", and type " + type.getType()); - } + IndexFieldData.Builder builder = fieldType.fielddataBuilder(); IndexFieldDataCache cache; synchronized (this) { cache = fieldDataCaches.get(fieldName); if (cache == null) { - // we default to node level cache, which in turn defaults to be unbounded - // this means changing the node level settings is simple, just set the bounds there - String cacheType = type.getSettings().get("cache", indexSettings.getValue(INDEX_FIELDDATA_CACHE_KEY)); + String cacheType = indexSettings.getValue(INDEX_FIELDDATA_CACHE_KEY); if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) { - cache = indicesFieldDataCache.buildIndexFieldDataCache(listener, index(), fieldName, type); + cache = indicesFieldDataCache.buildIndexFieldDataCache(listener, index(), fieldName); } else if ("none".equals(cacheType)){ cache = new IndexFieldDataCache.None(); } else { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java index bb31df75348..9e21562e8c7 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ShardFieldData.java @@ -51,7 +51,7 @@ public class ShardFieldData implements IndexFieldDataCache.Listener { } @Override - public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) { + public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) { totalMetric.inc(ramUsage.ramBytesUsed()); CounterMetric total = perFieldTotals.get(fieldName); if (total != null) { @@ -67,7 +67,7 @@ public class ShardFieldData implements IndexFieldDataCache.Listener { } @Override - public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { if (wasEvicted) { evictionsMetric.inc(); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index dc5041d24ef..e69ba4f4d0b 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -70,7 +70,7 @@ public enum GlobalOrdinalsBuilder { ); } return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(), - indexFieldData.getFieldDataType(), atomicFD, ordinalMap, memorySizeInBytes + atomicFD, ordinalMap, memorySizeInBytes ); } @@ -104,7 +104,7 @@ public enum GlobalOrdinalsBuilder { } final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT); return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(), - indexFieldData.getFieldDataType(), atomicFD, ordinalMap, 0 + atomicFD, ordinalMap, 0 ); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java index 5e1a2b57401..3e756204002 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java @@ -25,11 +25,9 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.MultiValueMode; import java.util.Collection; @@ -41,13 +39,11 @@ import java.util.Collections; public abstract class GlobalOrdinalsIndexFieldData extends AbstractIndexComponent implements IndexOrdinalsFieldData, Accountable { private final String fieldName; - private final FieldDataType fieldDataType; private final long memorySizeInBytes; - protected GlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, long memorySizeInBytes) { + protected GlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, long memorySizeInBytes) { super(indexSettings); this.fieldName = fieldName; - this.fieldDataType = fieldDataType; this.memorySizeInBytes = memorySizeInBytes; } @@ -71,11 +67,6 @@ public abstract class GlobalOrdinalsIndexFieldData extends AbstractIndexComponen return fieldName; } - @Override - public FieldDataType getFieldDataType() { - return fieldDataType; - } - @Override public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { throw new UnsupportedOperationException("no global ordinals sorting yet"); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/InternalGlobalOrdinalsIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/InternalGlobalOrdinalsIndexFieldData.java index 297c8b0f30c..5b8ef83b10e 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/InternalGlobalOrdinalsIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/InternalGlobalOrdinalsIndexFieldData.java @@ -24,9 +24,7 @@ import org.apache.lucene.index.RandomAccessOrds; import org.apache.lucene.util.Accountable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData; -import org.elasticsearch.index.mapper.MappedFieldType; import java.util.Collection; @@ -37,8 +35,8 @@ final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFiel private final Atomic[] atomicReaders; - InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) { - super(indexSettings, fieldName, fieldDataType, memorySizeInBytes); + InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) { + super(indexSettings, fieldName, memorySizeInBytes); this.atomicReaders = new Atomic[segmentAfd.length]; for (int i = 0; i < segmentAfd.length; i++) { atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index 2b69afa5f82..e3dc84a3477 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -35,7 +35,6 @@ import org.apache.lucene.util.LongsRef; import org.apache.lucene.util.packed.GrowableWriter; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PagedGrowableWriter; -import org.elasticsearch.common.settings.Settings; import java.io.Closeable; import java.io.IOException; @@ -287,20 +286,13 @@ public final class OrdinalsBuilder implements Closeable { private OrdinalsStore ordinals; private final LongsRef spare; - public OrdinalsBuilder(long numTerms, int maxDoc, float acceptableOverheadRatio) throws IOException { + public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException { this.maxDoc = maxDoc; int startBitsPerValue = 8; - if (numTerms >= 0) { - startBitsPerValue = PackedInts.bitsRequired(numTerms); - } ordinals = new OrdinalsStore(maxDoc, startBitsPerValue, acceptableOverheadRatio); spare = new LongsRef(); } - public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException { - this(-1, maxDoc, acceptableOverheadRatio); - } - public OrdinalsBuilder(int maxDoc) throws IOException { this(maxDoc, DEFAULT_ACCEPTABLE_OVERHEAD_RATIO); } @@ -413,10 +405,9 @@ public final class OrdinalsBuilder implements Closeable { /** * Builds an {@link Ordinals} instance from the builders current state. */ - public Ordinals build(Settings settings) { - final float acceptableOverheadRatio = settings.getAsFloat("acceptable_overhead_ratio", PackedInts.FASTEST); - final boolean forceMultiOrdinals = settings.getAsBoolean(FORCE_MULTI_ORDINALS, false); - if (forceMultiOrdinals || numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio)) { + public Ordinals build() { + final float acceptableOverheadRatio = PackedInts.DEFAULT; + if (numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio)) { // MultiOrdinals can be smaller than SinglePackedOrdinals for sparse fields return new MultiOrdinals(this, acceptableOverheadRatio); } else { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java index 3d4b6536b6c..23e770121a7 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -40,8 +39,8 @@ import java.io.IOException; public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFieldData implements IndexGeoPointFieldData { - AbstractGeoPointDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) { - super(index, fieldName, fieldDataType); + AbstractGeoPointDVIndexFieldData(Index index, String fieldName) { + super(index, fieldName); } @Override @@ -55,8 +54,8 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie public static class GeoPointDVIndexFieldData extends AbstractGeoPointDVIndexFieldData { final boolean indexCreatedBefore2x; - public GeoPointDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType, final boolean indexCreatedBefore2x) { - super(index, fieldName, fieldDataType); + public GeoPointDVIndexFieldData(Index index, String fieldName, final boolean indexCreatedBefore2x) { + super(index, fieldName); this.indexCreatedBefore2x = indexCreatedBefore2x; } @@ -82,8 +81,12 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie @Override public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { + if (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0) + && fieldType.hasDocValues() == false) { + return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), cache, breakerService); + } // Ignore breaker - return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name(), fieldType.fieldDataType(), + return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name(), indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java index 151ee92058d..604e866de02 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java @@ -27,7 +27,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicFieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.RamAccountingTermsEnum; @@ -39,13 +38,11 @@ import java.io.IOException; public abstract class AbstractIndexFieldData extends AbstractIndexComponent implements IndexFieldData { private final String fieldName; - protected final FieldDataType fieldDataType; protected final IndexFieldDataCache cache; - public AbstractIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, IndexFieldDataCache cache) { + public AbstractIndexFieldData(IndexSettings indexSettings, String fieldName, IndexFieldDataCache cache) { super(indexSettings); this.fieldName = fieldName; - this.fieldDataType = fieldDataType; this.cache = cache; } @@ -54,11 +51,6 @@ public abstract class AbstractIndexFieldData extends return this.fieldName; } - @Override - public FieldDataType getFieldDataType() { - return fieldDataType; - } - @Override public void clear() { cache.clear(fieldName); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java index 2c41dece3de..4e0eb218663 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -102,8 +101,8 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData implements IndexOrdinalsFieldData { - protected Settings frequency; - protected Settings regex; + private final double minFrequency, maxFrequency; + private final int minSegmentSize; protected final CircuitBreakerService breakerService; - protected AbstractIndexOrdinalsFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, - IndexFieldDataCache cache, CircuitBreakerService breakerService) { - super(indexSettings, fieldName, fieldDataType, cache); - final Map groups = fieldDataType.getSettings().getGroups("filter"); - frequency = groups.get("frequency"); - regex = groups.get("regex"); + protected AbstractIndexOrdinalsFieldData(IndexSettings indexSettings, String fieldName, + IndexFieldDataCache cache, CircuitBreakerService breakerService, + double minFrequency, double maxFrequency, int minSegmentSize) { + super(indexSettings, fieldName, cache); this.breakerService = breakerService; + this.minFrequency = minFrequency; + this.maxFrequency = maxFrequency; + this.minSegmentSize = minSegmentSize; } @Override @@ -110,17 +105,24 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD return AbstractAtomicOrdinalsFieldData.empty(); } - protected TermsEnum filter(Terms terms, LeafReader reader) throws IOException { - TermsEnum iterator = terms.iterator(); + protected TermsEnum filter(Terms terms, TermsEnum iterator, LeafReader reader) throws IOException { if (iterator == null) { return null; } - if (iterator != null && frequency != null) { - iterator = FrequencyFilter.filter(iterator, terms, reader, frequency); + int docCount = terms.getDocCount(); + if (docCount == -1) { + docCount = reader.maxDoc(); } - - if (iterator != null && regex != null) { - iterator = RegexFilter.filter(iterator, terms, reader, regex); + if (docCount >= minSegmentSize) { + final int minFreq = minFrequency > 1.0 + ? (int) minFrequency + : (int)(docCount * minFrequency); + final int maxFreq = maxFrequency > 1.0 + ? (int) maxFrequency + : (int)(docCount * maxFrequency); + if (minFreq > 1 || maxFreq < docCount) { + iterator = new FrequencyFilter(iterator, minFreq, maxFreq); + } } return iterator; } @@ -135,25 +137,6 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD this.maxFreq = maxFreq; } - public static TermsEnum filter(TermsEnum toFilter, Terms terms, LeafReader reader, Settings settings) throws IOException { - int docCount = terms.getDocCount(); - if (docCount == -1) { - docCount = reader.maxDoc(); - } - final double minFrequency = settings.getAsDouble("min", 0d); - final double maxFrequency = settings.getAsDouble("max", docCount+1d); - final double minSegmentSize = settings.getAsInt("min_segment_size", 0); - if (minSegmentSize < docCount) { - final int minFreq = minFrequency > 1.0? (int) minFrequency : (int)(docCount * minFrequency); - final int maxFreq = maxFrequency > 1.0? (int) maxFrequency : (int)(docCount * maxFrequency); - assert minFreq < maxFreq; - return new FrequencyFilter(toFilter, minFreq, maxFreq); - } - - return toFilter; - - } - @Override protected AcceptStatus accept(BytesRef arg0) throws IOException { int docFreq = docFreq(); @@ -164,33 +147,4 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD } } - private static final class RegexFilter extends FilteredTermsEnum { - - private final Matcher matcher; - private final CharsRefBuilder spare = new CharsRefBuilder(); - - public RegexFilter(TermsEnum delegate, Matcher matcher) { - super(delegate, false); - this.matcher = matcher; - } - public static TermsEnum filter(TermsEnum iterator, Terms terms, LeafReader reader, Settings regex) { - String pattern = regex.get("pattern"); - if (pattern == null) { - return iterator; - } - Pattern p = Pattern.compile(pattern); - return new RegexFilter(iterator, p.matcher("")); - } - - @Override - protected AcceptStatus accept(BytesRef arg0) throws IOException { - spare.copyUTF8Bytes(arg0); - matcher.reset(spare.get()); - if (matcher.matches()) { - return AcceptStatus.YES; - } - return AcceptStatus.NO; - } - } - } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java index c2a50942566..586ad1f0d48 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.index.Index; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; @@ -29,8 +28,8 @@ import org.elasticsearch.search.MultiValueMode; public class BinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData { - public BinaryDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) { - super(index, fieldName, fieldDataType); + public BinaryDVIndexFieldData(Index index, String fieldName) { + super(index, fieldName); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java index 988ecd61d65..bd3cdd71184 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java @@ -24,7 +24,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -37,8 +36,8 @@ import java.io.IOException; public class BytesBinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData { - public BytesBinaryDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) { - super(index, fieldName, fieldDataType); + public BytesBinaryDVIndexFieldData(Index index, String fieldName) { + super(index, fieldName); } @Override @@ -67,7 +66,7 @@ public class BytesBinaryDVIndexFieldData extends DocValuesIndexFieldData impleme CircuitBreakerService breakerService, MapperService mapperService) { // Ignore breaker final String fieldName = fieldType.name(); - return new BytesBinaryDVIndexFieldData(indexSettings.getIndex(), fieldName, fieldType.fieldDataType()); + return new BytesBinaryDVIndexFieldData(indexSettings.getIndex(), fieldName); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 27db531a218..111595859a9 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; @@ -46,14 +45,12 @@ public abstract class DocValuesIndexFieldData { protected final Index index; protected final String fieldName; - protected final FieldDataType fieldDataType; protected final ESLogger logger; - public DocValuesIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) { + public DocValuesIndexFieldData(Index index, String fieldName) { super(); this.index = index; this.fieldName = fieldName; - this.fieldDataType = fieldDataType; this.logger = Loggers.getLogger(getClass()); } @@ -61,10 +58,6 @@ public abstract class DocValuesIndexFieldData { return fieldName; } - public final FieldDataType getFieldDataType() { - return fieldDataType; - } - public final void clear() { // can't do } @@ -92,19 +85,13 @@ public abstract class DocValuesIndexFieldData { CircuitBreakerService breakerService, MapperService mapperService) { // Ignore Circuit Breaker final String fieldName = fieldType.name(); - final Settings fdSettings = fieldType.fieldDataType().getSettings(); - final Map filter = fdSettings.getGroups("filter"); - if (filter != null && !filter.isEmpty()) { - throw new IllegalArgumentException("Doc values field data doesn't support filters [" + fieldName + "]"); - } - if (BINARY_INDEX_FIELD_NAMES.contains(fieldName)) { assert numericType == null; - return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName, fieldType.fieldDataType()); + return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName); } else if (numericType != null) { - return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType, fieldType.fieldDataType()); + return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType); } else { - return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService, fieldType.fieldDataType()); + return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java index e5724453eee..d484c503c2b 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointArrayIndexFieldData.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData; import org.elasticsearch.index.fielddata.FieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.ordinals.Ordinals; @@ -50,18 +49,9 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData { private final CircuitBreakerService breakerService; - public static class Builder implements IndexFieldData.Builder { - @Override - public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, - CircuitBreakerService breakerService, MapperService mapperService) { - return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), fieldType.fieldDataType(), cache, - breakerService); - } - } - public GeoPointArrayIndexFieldData(IndexSettings indexSettings, String fieldName, - FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) { - super(indexSettings, fieldName, fieldDataType, cache); + IndexFieldDataCache cache, CircuitBreakerService breakerService) { + super(indexSettings, fieldName, cache); this.breakerService = breakerService; } @@ -88,8 +78,7 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData private AtomicGeoPointFieldData loadFieldData22(LeafReader reader, NonEstimatingEstimator estimator, Terms terms, AtomicGeoPointFieldData data) throws Exception { LongArray indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(128); - final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", - OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO); + final float acceptableTransientOverheadRatio = OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO; boolean success = false; try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) { final TermsEnum termsEnum; @@ -112,10 +101,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData } indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.resize(indexedPoints, numTerms); - Ordinals build = builder.build(fieldDataType.getSettings()); + Ordinals build = builder.build(); RandomAccessOrds ordinals = build.ordinals(); - if (!(FieldData.isMultiValued(ordinals) || CommonSettings.getMemoryStorageHint(fieldDataType) == CommonSettings - .MemoryStorageFormat.ORDINALS)) { + if (FieldData.isMultiValued(ordinals) == false) { int maxDoc = reader.maxDoc(); LongArray sIndexedPoint = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(reader.maxDoc()); for (int i=0; i parentTypes = new HashSet<>(); for (DocumentMapper mapper : mapperService.docMappers(false)) { @@ -146,7 +145,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData> FieldStats get(String field) throws IOException { + MappedFieldType mappedFieldType = mapperService.fullName(field); + if (mappedFieldType != null) { + IndexReader reader = searcher.reader(); + Terms terms = MultiFields.getTerms(reader, field); + if (terms != null) { + return mappedFieldType.stats(terms, reader.maxDoc()); + } + } + return null; + } + + /** + * @param fieldName + * the fieldName to check + * @param from + * the minimum value for the query + * @param to + * the maximum value for the query + * @param includeLower + * whether the from value is inclusive + * @param includeUpper + * whether the to value is inclusive + * @param timeZone + * the timeZone to use for date fields + * @param dateMathParser + * the {@link DateMathParser} to use for date fields + * @return A {@link Relation} indicating the overlap of the range of terms + * for the field with the query range. This method will return: + *

    + *
  • {@link Relation#WITHIN} if the range of terms for the field + * in the shard is completely within the query range
  • + *
  • {@link Relation#DISJOINT} if the range of terms for the field + * in the shard is completely outside the query range
  • + *
  • {@link Relation#INTERSECTS} if the range of terms for the + * field in the shard intersects with the query range
  • + *
+ * @throws IOException + * if the index cannot be read + */ + public Relation isFieldWithinQuery(String fieldName, Object from, Object to, boolean includeLower, boolean includeUpper, + DateTimeZone timeZone, DateMathParser dateMathParser) throws IOException { + MappedFieldType mappedFieldType = mapperService.fullName(fieldName); + FieldStats fieldStats = get(fieldName); + if (fieldStats == null) { + // No fieldStats for the field so the field doesn't exist on + // this shard, so relation is DISJOINT + return Relation.DISJOINT; + } else { + // Convert the from and to values to Strings so they can be used + // in the IndexConstraints. Since DateTime is represented as a + // Long field in Lucene we need to use the millisecond value of + // the DateTime in that case + String fromString = null; + if (from != null) { + if (mappedFieldType instanceof DateFieldType) { + long millis = ((DateFieldType) mappedFieldType).parseToMilliseconds(from, !includeLower, timeZone, dateMathParser); + fromString = fieldStats.stringValueOf(millis, null); + } else if (mappedFieldType instanceof IpFieldType) { + if (from instanceof BytesRef) { + from = ((BytesRef) from).utf8ToString(); + } + long ipAsLong = ((IpFieldType) mappedFieldType).value(from); + fromString = fieldStats.stringValueOf(ipAsLong, null); + } else { + fromString = fieldStats.stringValueOf(from, null); + } + } + String toString = null; + if (to != null) { + if (mappedFieldType instanceof DateFieldType) { + long millis = ((DateFieldType) mappedFieldType).parseToMilliseconds(to, includeUpper, timeZone, dateMathParser); + toString = fieldStats.stringValueOf(millis, null); + } else if (mappedFieldType instanceof IpFieldType) { + if (to instanceof BytesRef) { + to = ((BytesRef) to).utf8ToString(); + } + long ipAsLong = ((IpFieldType) mappedFieldType).value(to); + toString = fieldStats.stringValueOf(ipAsLong, null); + } else { + toString = fieldStats.stringValueOf(to, null); + } + } + if ((from == null || fieldStats + .match(new IndexConstraint(fieldName, Property.MIN, includeLower ? Comparison.GTE : Comparison.GT, fromString))) + && (to == null || fieldStats.match( + new IndexConstraint(fieldName, Property.MAX, includeUpper ? Comparison.LTE : Comparison.LT, toString)))) { + // If the min and max terms for the field are both within + // the query range then all documents will match so relation is + // WITHIN + return Relation.WITHIN; + } else if ((to != null && fieldStats + .match(new IndexConstraint(fieldName, Property.MIN, includeUpper ? Comparison.GT : Comparison.GTE, toString))) + || (from != null && fieldStats.match( + new IndexConstraint(fieldName, Property.MAX, includeLower ? Comparison.LT : Comparison.LTE, fromString)))) { + // If the min and max terms are both outside the query range + // then no document will match so relation is DISJOINT (N.B. + // since from <= to we only need + // to check one bould for each side of the query range) + return Relation.DISJOINT; + } + } + // Range of terms doesn't match any of the constraints so must INTERSECT + return Relation.INTERSECTS; + } + + /** + * An enum used to describe the relation between the range of terms in a + * shard when compared with a query range + */ + public static enum Relation { + WITHIN, INTERSECTS, DISJOINT; + } +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index b1f6f7cd9bd..e2c897226c1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -25,8 +25,6 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -140,19 +138,19 @@ public class DocumentMapperParser { return docBuilder.build(mapperService); } - public static void checkNoRemainingFields(String fieldName, Map fieldNodeMap, Version indexVersionCreated) { + public static void checkNoRemainingFields(String fieldName, Map fieldNodeMap, Version indexVersionCreated) { checkNoRemainingFields(fieldNodeMap, indexVersionCreated, "Mapping definition for [" + fieldName + "] has unsupported parameters: "); } - public static void checkNoRemainingFields(Map fieldNodeMap, Version indexVersionCreated, String message) { + public static void checkNoRemainingFields(Map fieldNodeMap, Version indexVersionCreated, String message) { if (!fieldNodeMap.isEmpty()) { throw new MapperParsingException(message + getRemainingFields(fieldNodeMap)); } } - private static String getRemainingFields(Map map) { + private static String getRemainingFields(Map map) { StringBuilder remainingFields = new StringBuilder(); - for (String key : map.keySet()) { + for (Object key : map.keySet()) { remainingFields.append(" [").append(key).append(" : ").append(map.get(key)).append("]"); } return remainingFields.toString(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 2694b64783b..0c9f2daa6cb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -19,8 +19,14 @@ package org.elasticsearch.index.mapper; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.common.Strings; @@ -48,15 +54,8 @@ import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; -import java.io.Closeable; -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - /** A parser for documents, given mappings from a DocumentMapper */ -class DocumentParser implements Closeable { +final class DocumentParser implements Closeable { private CloseableThreadLocal cache = new CloseableThreadLocal() { @Override @@ -99,7 +98,7 @@ class DocumentParser implements Closeable { reverseOrder(context); - ParsedDocument doc = parsedDocument(source, context, update(context, mapping)); + ParsedDocument doc = parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers())); // reset the context to free up memory context.reset(null, null, null); return doc; @@ -116,10 +115,7 @@ class DocumentParser implements Closeable { // entire type is disabled parser.skipChildren(); } else if (emptyDoc == false) { - Mapper update = parseObject(context, mapping.root, true); - if (update != null) { - context.addDynamicMappingsUpdate(update); - } + parseObjectOrNested(context, mapping.root, true); } for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { @@ -201,11 +197,6 @@ class DocumentParser implements Closeable { } - private static Mapping update(ParseContext.InternalParseContext context, Mapping mapping) { - Mapper rootDynamicUpdate = context.dynamicMappingsUpdate(); - return rootDynamicUpdate != null ? mapping.mappingUpdate(rootDynamicUpdate) : null; - } - private static MapperParsingException wrapInMapperParsingException(SourceToParse source, Throwable e) { // if its already a mapper parsing exception, no need to wrap it... if (e instanceof MapperParsingException) { @@ -220,10 +211,156 @@ class DocumentParser implements Closeable { return new MapperParsingException("failed to parse", e); } - static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException { + /** Creates a Mapping containing any dynamically added fields, or returns null if there were no dynamic mappings. */ + static Mapping createDynamicUpdate(Mapping mapping, DocumentMapper docMapper, List dynamicMappers) { + if (dynamicMappers.isEmpty()) { + return null; + } + // We build a mapping by first sorting the mappers, so that all mappers containing a common prefix + // will be processed in a contiguous block. When the prefix is no longer seen, we pop the extra elements + // off the stack, merging them upwards into the existing mappers. + Collections.sort(dynamicMappers, (Mapper o1, Mapper o2) -> o1.name().compareTo(o2.name())); + Iterator dynamicMapperItr = dynamicMappers.iterator(); + List parentMappers = new ArrayList<>(); + Mapper firstUpdate = dynamicMapperItr.next(); + parentMappers.add(createUpdate(mapping.root(), firstUpdate.name().split("\\."), 0, firstUpdate)); + Mapper previousMapper = null; + while (dynamicMapperItr.hasNext()) { + Mapper newMapper = dynamicMapperItr.next(); + if (previousMapper != null && newMapper.name().equals(previousMapper.name())) { + // We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where + // foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical. + // Here we just skip over the duplicates, but we merge them to ensure there are no conflicts. + newMapper.merge(previousMapper, false); + continue; + } + previousMapper = newMapper; + String[] nameParts = newMapper.name().split("\\."); + + // We first need the stack to only contain mappers in common with the previously processed mapper + // For example, if the first mapper processed was a.b.c, and we now have a.d, the stack will contain + // a.b, and we want to merge b back into the stack so it just contains a + int i = removeUncommonMappers(parentMappers, nameParts); + + // Then we need to add back mappers that may already exist within the stack, but are not on it. + // For example, if we processed a.b, followed by an object mapper a.c.d, and now are adding a.c.d.e + // then the stack will only have a on it because we will have already merged a.c.d into the stack. + // So we need to pull a.c, followed by a.c.d, onto the stack so e can be added to the end. + i = expandCommonMappers(parentMappers, nameParts, i); + + // If there are still parents of the new mapper which are not on the stack, we need to pull them + // from the existing mappings. In order to maintain the invariant that the stack only contains + // fields which are updated, we cannot simply add the existing mappers to the stack, since they + // may have other subfields which will not be updated. Instead, we pull the mapper from the existing + // mappings, and build an update with only the new mapper and its parents. This then becomes our + // "new mapper", and can be added to the stack. + if (i < nameParts.length - 1) { + newMapper = createExistingMapperUpdate(parentMappers, nameParts, i, docMapper, newMapper); + } + + if (newMapper instanceof ObjectMapper) { + parentMappers.add((ObjectMapper)newMapper); + } else { + addToLastMapper(parentMappers, newMapper, true); + } + } + popMappers(parentMappers, 1, true); + assert parentMappers.size() == 1; + + return mapping.mappingUpdate(parentMappers.get(0)); + } + + private static void popMappers(List parentMappers, int keepBefore, boolean merge) { + assert keepBefore >= 1; // never remove the root mapper + // pop off parent mappers not needed by the current mapper, + // merging them backwards since they are immutable + for (int i = parentMappers.size() - 1; i >= keepBefore; --i) { + addToLastMapper(parentMappers, parentMappers.remove(i), merge); + } + } + + /** + * Adds a mapper as an update into the last mapper. If merge is true, the new mapper + * will be merged in with other child mappers of the last parent, otherwise it will be a new update. + */ + private static void addToLastMapper(List parentMappers, Mapper mapper, boolean merge) { + assert parentMappers.size() >= 1; + int lastIndex = parentMappers.size() - 1; + ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper); + if (merge) { + withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false); + } + parentMappers.set(lastIndex, withNewMapper); + } + + /** + * Removes mappers that exist on the stack, but are not part of the path of the current nameParts, + * Returns the next unprocessed index from nameParts. + */ + private static int removeUncommonMappers(List parentMappers, String[] nameParts) { + int keepBefore = 1; + while (keepBefore < parentMappers.size() && + parentMappers.get(keepBefore).simpleName().equals(nameParts[keepBefore - 1])) { + ++keepBefore; + } + popMappers(parentMappers, keepBefore, true); + return keepBefore - 1; + } + + /** + * Adds mappers from the end of the stack that exist as updates within those mappers. + * Returns the next unprocessed index from nameParts. + */ + private static int expandCommonMappers(List parentMappers, String[] nameParts, int i) { + ObjectMapper last = parentMappers.get(parentMappers.size() - 1); + while (i < nameParts.length - 1 && last.getMapper(nameParts[i]) != null) { + Mapper newLast = last.getMapper(nameParts[i]); + assert newLast instanceof ObjectMapper; + last = (ObjectMapper) newLast; + parentMappers.add(last); + ++i; + } + return i; + } + + /** Creates an update for intermediate object mappers that are not on the stack, but parents of newMapper. */ + private static ObjectMapper createExistingMapperUpdate(List parentMappers, String[] nameParts, int i, + DocumentMapper docMapper, Mapper newMapper) { + String updateParentName = nameParts[i]; + final ObjectMapper lastParent = parentMappers.get(parentMappers.size() - 1); + if (parentMappers.size() > 1) { + // only prefix with parent mapper if the parent mapper isn't the root (which has a fake name) + updateParentName = lastParent.name() + '.' + nameParts[i]; + } + ObjectMapper updateParent = docMapper.objectMappers().get(updateParentName); + assert updateParent != null : updateParentName + " doesn't exist"; + return createUpdate(updateParent, nameParts, i + 1, newMapper); + } + + /** Build an update for the parent which will contain the given mapper and any intermediate fields. */ + private static ObjectMapper createUpdate(ObjectMapper parent, String[] nameParts, int i, Mapper mapper) { + List parentMappers = new ArrayList<>(); + ObjectMapper previousIntermediate = parent; + for (; i < nameParts.length - 1; ++i) { + Mapper intermediate = previousIntermediate.getMapper(nameParts[i]); + assert intermediate != null : "Field " + previousIntermediate.name() + " does not have a subfield " + nameParts[i]; + assert intermediate instanceof ObjectMapper; + parentMappers.add((ObjectMapper)intermediate); + previousIntermediate = (ObjectMapper)intermediate; + } + if (parentMappers.isEmpty() == false) { + // add the new mapper to the stack, and pop down to the original parent level + addToLastMapper(parentMappers, mapper, false); + popMappers(parentMappers, 1, false); + mapper = parentMappers.get(0); + } + return parent.mappingUpdate(mapper); + } + + static void parseObjectOrNested(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException { if (mapper.isEnabled() == false) { context.parser().skipChildren(); - return null; + return; } XContentParser parser = context.parser(); @@ -234,7 +371,7 @@ class DocumentParser implements Closeable { XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.VALUE_NULL) { // the object is null ("obj1" : null), simply bail - return null; + return; } if (token.isValue()) { @@ -256,21 +393,19 @@ class DocumentParser implements Closeable { } ObjectMapper update = null; - update = innerParseObject(context, mapper, parser, currentFieldName, token, update); + innerParseObject(context, mapper, parser, currentFieldName, token); // restore the enable path flag if (nested.isNested()) { nested(context, nested); } - return update; } - private static ObjectMapper innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token, ObjectMapper update) throws IOException { + private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token) throws IOException { while (token != XContentParser.Token.END_OBJECT) { - ObjectMapper newUpdate = null; if (token == XContentParser.Token.START_OBJECT) { - newUpdate = parseObject(context, mapper, currentFieldName); + parseObject(context, mapper, currentFieldName); } else if (token == XContentParser.Token.START_ARRAY) { - newUpdate = parseArray(context, mapper, currentFieldName); + parseArray(context, mapper, currentFieldName); } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { @@ -278,18 +413,10 @@ class DocumentParser implements Closeable { } else if (token == null) { throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); } else if (token.isValue()) { - newUpdate = parseValue(context, mapper, currentFieldName, token); + parseValue(context, mapper, currentFieldName, token); } token = parser.nextToken(); - if (newUpdate != null) { - if (update == null) { - update = newUpdate; - } else { - update = update.merge(newUpdate, false); - } - } } - return update; } private static void nested(ParseContext context, ObjectMapper.Nested nested) { @@ -335,33 +462,29 @@ class DocumentParser implements Closeable { return context; } - private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { + private static void parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { if (mapper instanceof ObjectMapper) { - return parseObject(context, (ObjectMapper) mapper, false); + parseObjectOrNested(context, (ObjectMapper) mapper, false); } else { FieldMapper fieldMapper = (FieldMapper)mapper; Mapper update = fieldMapper.parse(context); + if (update != null) { + context.addDynamicMapper(update); + } if (fieldMapper.copyTo() != null) { parseCopyFields(context, fieldMapper, fieldMapper.copyTo().copyToFields()); } - return update; } } private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException { - if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + mapper.name() + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]"); - } + assert currentFieldName != null; context.path().add(currentFieldName); ObjectMapper update = null; Mapper objectMapper = mapper.getMapper(currentFieldName); if (objectMapper != null) { - final Mapper subUpdate = parseObjectOrField(context, objectMapper); - if (subUpdate != null) { - // propagate mapping update - update = mapper.mappingUpdate(subUpdate); - } + parseObjectOrField(context, objectMapper); } else { ObjectMapper.Dynamic dynamic = mapper.dynamic(); if (dynamic == null) { @@ -382,8 +505,9 @@ class DocumentParser implements Closeable { } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); objectMapper = builder.build(builderContext); + context.addDynamicMapper(objectMapper); context.path().add(currentFieldName); - update = mapper.mappingUpdate(parseAndMergeUpdate(objectMapper, context)); + parseObjectOrField(context, objectMapper); } else { // not dynamic, read everything up to end object context.parser().skipChildren(); @@ -394,7 +518,7 @@ class DocumentParser implements Closeable { return update; } - private static ObjectMapper parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + private static void parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { String arrayFieldName = lastFieldName; Mapper mapper = parentMapper.getMapper(lastFieldName); if (mapper != null) { @@ -402,15 +526,9 @@ class DocumentParser implements Closeable { // expects an array, if so we pass the context straight to the mapper and if not // we serialize the array components if (mapper instanceof ArrayValueMapperParser) { - final Mapper subUpdate = parseObjectOrField(context, mapper); - if (subUpdate != null) { - // propagate the mapping update - return parentMapper.mappingUpdate(subUpdate); - } else { - return null; - } + parseObjectOrField(context, mapper); } else { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } } else { @@ -423,31 +541,34 @@ class DocumentParser implements Closeable { } else if (dynamic == ObjectMapper.Dynamic.TRUE) { Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object"); if (builder == null) { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + return; } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = builder.build(builderContext); - if (mapper != null && mapper instanceof ArrayValueMapperParser) { + assert mapper != null; + if (mapper instanceof ArrayValueMapperParser) { + context.addDynamicMapper(mapper); context.path().add(arrayFieldName); - mapper = parseAndMergeUpdate(mapper, context); - return parentMapper.mappingUpdate(mapper); + parseObjectOrField(context, mapper); } else { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } } else { - return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + // TODO: shouldn't this skip, not parse? + parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); } } } - private static ObjectMapper parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { + private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { - return parseObject(context, mapper, lastFieldName); + parseObject(context, mapper, lastFieldName); } else if (token == XContentParser.Token.START_ARRAY) { - return parseArray(context, mapper, lastFieldName); + parseArray(context, mapper, lastFieldName); } else if (token == XContentParser.Token.FIELD_NAME) { lastFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { @@ -455,25 +576,20 @@ class DocumentParser implements Closeable { } else if (token == null) { throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); } else { - return parseValue(context, mapper, lastFieldName, token); + parseValue(context, mapper, lastFieldName, token); } } - return null; } - private static ObjectMapper parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + private static void parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { if (currentFieldName == null) { throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); } Mapper mapper = parentMapper.getMapper(currentFieldName); if (mapper != null) { - Mapper subUpdate = parseObjectOrField(context, mapper); - if (subUpdate == null) { - return null; - } - return parentMapper.mappingUpdate(subUpdate); + parseObjectOrField(context, mapper); } else { - return parseDynamicValue(context, parentMapper, currentFieldName, token); + parseDynamicValue(context, parentMapper, currentFieldName, token); } } @@ -589,7 +705,7 @@ class DocumentParser implements Closeable { Double.parseDouble(text); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); if (builder == null) { - builder = new DoubleFieldMapper.Builder(currentFieldName); + builder = new FloatFieldMapper.Builder(currentFieldName); } return builder; } catch (NumberFormatException e) { @@ -641,7 +757,7 @@ class DocumentParser implements Closeable { throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); } - private static ObjectMapper parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); if (dynamic == null) { dynamic = dynamicOrDefault(context.root().dynamic()); @@ -650,7 +766,7 @@ class DocumentParser implements Closeable { throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName); } if (dynamic == ObjectMapper.Dynamic.FALSE) { - return null; + return; } final String path = context.path().pathAsText(currentFieldName); final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); @@ -668,14 +784,9 @@ class DocumentParser implements Closeable { // try to not introduce a conflict mapper = mapper.updateFieldType(Collections.singletonMap(path, existingFieldType)); } + context.addDynamicMapper(mapper); - mapper = parseAndMergeUpdate(mapper, context); - - ObjectMapper update = null; - if (mapper != null) { - update = parentMapper.mappingUpdate(mapper); - } - return update; + parseObjectOrField(context, mapper); } /** Creates instances of the fields that the current field should be copied to */ @@ -713,8 +824,9 @@ class DocumentParser implements Closeable { // The path of the dest field might be completely different from the current one so we need to reset it context = context.overridePath(new ContentPath(0)); - String[] paths = Strings.splitStringToArray(field, '.'); - String fieldName = paths[paths.length-1]; + // TODO: why Strings.splitStringToArray instead of String.split? + final String[] paths = Strings.splitStringToArray(field, '.'); + final String fieldName = paths[paths.length-1]; ObjectMapper mapper = context.root(); ObjectMapper[] mappers = new ObjectMapper[paths.length-1]; if (paths.length > 1) { @@ -745,6 +857,7 @@ class DocumentParser implements Closeable { if (mapper.nested() != ObjectMapper.Nested.NO) { throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`"); } + context.addDynamicMapper(mapper); break; case FALSE: // Maybe we should log something to tell the user that the copy_to is ignored in this case. @@ -759,36 +872,10 @@ class DocumentParser implements Closeable { parent = mapper; } } - ObjectMapper update = parseDynamicValue(context, mapper, fieldName, context.parser().currentToken()); - assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping - - if (paths.length > 1) { - for (int i = paths.length - 2; i >= 0; i--) { - ObjectMapper parent = context.root(); - if (i > 0) { - parent = mappers[i-1]; - } - assert parent != null; - update = parent.mappingUpdate(update); - } - } - context.addDynamicMappingsUpdate(update); + parseDynamicValue(context, mapper, fieldName, context.parser().currentToken()); } } - /** - * Parse the given {@code context} with the given {@code mapper} and apply - * the potential mapping update in-place. This method is useful when - * composing mapping updates. - */ - private static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { - final Mapper update = parseObjectOrField(context, mapper); - if (update != null) { - mapper = (M) mapper.merge(update, false); - } - return mapper; - } - private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper.Dynamic dynamic) { return dynamic == null ? ObjectMapper.Dynamic.TRUE : dynamic; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index dba1355a395..8077a732142 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -25,14 +25,13 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.Version; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.core.TypeParsers; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -49,8 +48,10 @@ import java.util.Map; import java.util.stream.StreamSupport; public abstract class FieldMapper extends Mapper implements Cloneable { - public static final Setting IGNORE_MALFORMED_SETTING = Setting.boolSetting("index.mapping.ignore_malformed", false, false, Setting.Scope.INDEX); - public static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", false, false, Setting.Scope.INDEX); + public static final Setting IGNORE_MALFORMED_SETTING = + Setting.boolSetting("index.mapping.ignore_malformed", false, Property.IndexScope); + public static final Setting COERCE_SETTING = + Setting.boolSetting("index.mapping.coerce", false, Property.IndexScope); public abstract static class Builder extends Mapper.Builder { protected final MappedFieldType fieldType; @@ -61,8 +62,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { protected Boolean includeInAll; protected boolean indexOptionsSet = false; protected boolean docValuesSet = false; - @Nullable - protected Settings fieldDataSettings; protected final MultiFields.Builder multiFieldsBuilder; protected CopyTo copyTo; @@ -200,16 +199,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { return builder; } - public T normsLoading(MappedFieldType.Loading normsLoading) { - this.fieldType.setNormsLoading(normsLoading); - return builder; - } - - public T fieldDataSettings(Settings settings) { - this.fieldDataSettings = settings; - return builder; - } - public Builder nullValue(Object nullValue) { this.fieldType.setNullValue(nullValue); return this; @@ -230,7 +219,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { } protected boolean defaultDocValues(Version indexCreated) { - if (indexCreated.onOrAfter(Version.V_5_0_0)) { + if (indexCreated.onOrAfter(Version.V_5_0_0_alpha1)) { // add doc values by default to keyword (boolean, numerics, etc.) fields return fieldType.tokenized() == false; } else { @@ -240,14 +229,13 @@ public abstract class FieldMapper extends Mapper implements Cloneable { protected void setupFieldType(BuilderContext context) { fieldType.setName(buildFullName(context)); + if (context.indexCreatedVersion().before(Version.V_5_0_0_alpha1)) { + fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); + } if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) { fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); } - if (fieldDataSettings != null) { - Settings settings = Settings.builder().put(fieldType.fieldDataType().getSettings()).put(fieldDataSettings).build(); - fieldType.setFieldDataType(new FieldDataType(fieldType.fieldDataType().getType(), settings)); - } boolean defaultDocValues = defaultDocValues(context.indexCreatedVersion()); defaultFieldType.setHasDocValues(defaultDocValues); if (docValuesSet == false) { @@ -301,7 +289,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable { if (!customBoost() // don't set boosts eg. on dv fields && field.fieldType().indexOptions() != IndexOptions.NONE - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(fieldType().boost()); } context.doc().add(field); @@ -374,7 +362,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable { // this can happen if this mapper represents a mapping update return this; } else if (fieldType.getClass() != newFieldType.getClass()) { - throw new IllegalStateException("Mixing up field types: " + fieldType.getClass() + " != " + newFieldType.getClass()); + throw new IllegalStateException("Mixing up field types: " + + fieldType.getClass() + " != " + newFieldType.getClass() + " on field " + fieldType.name()); } MultiFields updatedMultiFields = multiFields.updateFieldType(fullNameToFieldType); if (fieldType == newFieldType && multiFields == updatedMultiFields) { @@ -415,19 +404,15 @@ public abstract class FieldMapper extends Mapper implements Cloneable { if (includeDefaults || fieldType().storeTermVectors() != defaultFieldType.storeTermVectors()) { builder.field("term_vector", termVectorOptionsToString(fieldType())); } - if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms() || fieldType().normsLoading() != null) { - builder.startObject("norms"); - if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) { - builder.field("enabled", !fieldType().omitNorms()); - } - if (fieldType().normsLoading() != null) { - builder.field(MappedFieldType.Loading.KEY, fieldType().normsLoading()); - } - builder.endObject(); + if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) { + builder.field("norms", fieldType().omitNorms() == false); } if (indexed && (includeDefaults || fieldType().indexOptions() != defaultFieldType.indexOptions())) { builder.field("index_options", indexOptionToString(fieldType().indexOptions())); } + if (includeDefaults || fieldType().eagerGlobalOrdinals() != defaultFieldType.eagerGlobalOrdinals()) { + builder.field("eager_global_ordinals", fieldType().eagerGlobalOrdinals()); + } if (fieldType().similarity() != null) { builder.field("similarity", fieldType().similarity().name()); @@ -435,9 +420,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { builder.field("similarity", SimilarityService.DEFAULT_SIMILARITY); } - if (includeDefaults || hasCustomFieldDataSettings()) { - builder.field("fielddata", fieldType().fieldDataType().getSettings().getAsMap()); - } multiFields.toXContent(builder, params); if (copyTo != null) { @@ -517,10 +499,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable { return indexed; } - protected boolean hasCustomFieldDataSettings() { - return fieldType().fieldDataType() != null && fieldType().fieldDataType().equals(defaultFieldType.fieldDataType()) == false; - } - protected abstract String contentType(); public static class MultiFields { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 5e9378e2f55..5f6fddf09ef 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -154,8 +154,6 @@ class FieldTypeLookup implements Iterable { for (MappedFieldType fieldType : this) { if (Regex.simpleMatch(pattern, fieldType.name())) { fields.add(fieldType.name()); - } else if (Regex.simpleMatch(pattern, fieldType.name())) { - fields.add(fieldType.name()); } } return fields; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 10b165ff4c5..9eafa8ccd71 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -37,11 +37,10 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.fielddata.FieldDataType; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -55,46 +54,6 @@ import java.util.Objects; */ public abstract class MappedFieldType extends FieldType { - public enum Loading { - LAZY { - @Override - public String toString() { - return LAZY_VALUE; - } - }, - EAGER { - @Override - public String toString() { - return EAGER_VALUE; - } - }, - EAGER_GLOBAL_ORDINALS { - @Override - public String toString() { - return EAGER_GLOBAL_ORDINALS_VALUE; - } - }; - - public static final String KEY = "loading"; - public static final String EAGER_GLOBAL_ORDINALS_VALUE = "eager_global_ordinals"; - public static final String EAGER_VALUE = "eager"; - public static final String LAZY_VALUE = "lazy"; - - public static Loading parse(String loading, Loading defaultValue) { - if (Strings.isNullOrEmpty(loading)) { - return defaultValue; - } else if (EAGER_GLOBAL_ORDINALS_VALUE.equalsIgnoreCase(loading)) { - return EAGER_GLOBAL_ORDINALS; - } else if (EAGER_VALUE.equalsIgnoreCase(loading)) { - return EAGER; - } else if (LAZY_VALUE.equalsIgnoreCase(loading)) { - return LAZY; - } else { - throw new MapperParsingException("Unknown [" + KEY + "] value: [" + loading + "]"); - } - } - } - private String name; private float boost; // TODO: remove this docvalues flag and use docValuesType @@ -103,10 +62,9 @@ public abstract class MappedFieldType extends FieldType { private NamedAnalyzer searchAnalyzer; private NamedAnalyzer searchQuoteAnalyzer; private SimilarityProvider similarity; - private Loading normsLoading; - private FieldDataType fieldDataType; private Object nullValue; private String nullValueAsString; // for sending null value to _all field + private boolean eagerGlobalOrdinals; protected MappedFieldType(MappedFieldType ref) { super(ref); @@ -117,10 +75,9 @@ public abstract class MappedFieldType extends FieldType { this.searchAnalyzer = ref.searchAnalyzer(); this.searchQuoteAnalyzer = ref.searchQuoteAnalyzer(); this.similarity = ref.similarity(); - this.normsLoading = ref.normsLoading(); - this.fieldDataType = ref.fieldDataType(); this.nullValue = ref.nullValue(); this.nullValueAsString = ref.nullValueAsString(); + this.eagerGlobalOrdinals = ref.eagerGlobalOrdinals; } public MappedFieldType() { @@ -130,12 +87,16 @@ public abstract class MappedFieldType extends FieldType { setOmitNorms(false); setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); setBoost(1.0f); - fieldDataType = new FieldDataType(typeName()); } @Override public abstract MappedFieldType clone(); + /** Return a fielddata builder for this field. */ + public IndexFieldData.Builder fielddataBuilder() { + throw new IllegalArgumentException("Fielddata is not supported on fields of type [" + typeName() + "]"); + } + @Override public boolean equals(Object o) { if (!super.equals(o)) return false; @@ -158,8 +119,7 @@ public abstract class MappedFieldType extends FieldType { Objects.equals(indexAnalyzer, fieldType.indexAnalyzer) && Objects.equals(searchAnalyzer, fieldType.searchAnalyzer) && Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && - Objects.equals(normsLoading, fieldType.normsLoading) && - Objects.equals(fieldDataType, fieldType.fieldDataType) && + Objects.equals(eagerGlobalOrdinals, fieldType.eagerGlobalOrdinals) && Objects.equals(nullValue, fieldType.nullValue) && Objects.equals(nullValueAsString, fieldType.nullValueAsString); } @@ -167,7 +127,7 @@ public abstract class MappedFieldType extends FieldType { @Override public int hashCode() { return Objects.hash(super.hashCode(), name, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, - similarity == null ? null : similarity.name(), normsLoading, fieldDataType, nullValue, nullValueAsString); + eagerGlobalOrdinals, similarity == null ? null : similarity.name(), nullValue, nullValueAsString); } // norelease: we need to override freeze() and add safety checks that all settings are actually set @@ -205,7 +165,7 @@ public abstract class MappedFieldType extends FieldType { conflicts.add("mapper [" + name() + "] has different [doc_values] values"); } if (omitNorms() && !other.omitNorms()) { - conflicts.add("mapper [" + name() + "] has different [omit_norms] values, cannot change from disable to enabled"); + conflicts.add("mapper [" + name() + "] has different [norms] values, cannot change from disable to enabled"); } if (storeTermVectors() != other.storeTermVectors()) { conflicts.add("mapper [" + name() + "] has different [store_term_vector] values"); @@ -242,21 +202,18 @@ public abstract class MappedFieldType extends FieldType { if (boost() != other.boost()) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); } - if (normsLoading() != other.normsLoading()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [norms.loading] across all types."); - } if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); } if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types."); } - if (Objects.equals(fieldDataType(), other.fieldDataType()) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] across all types."); - } if (Objects.equals(nullValue(), other.nullValue()) == false) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [null_value] across all types."); } + if (eagerGlobalOrdinals() != other.eagerGlobalOrdinals()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [eager_global_ordinals] across all types."); + } } } @@ -286,15 +243,6 @@ public abstract class MappedFieldType extends FieldType { this.boost = boost; } - public FieldDataType fieldDataType() { - return fieldDataType; - } - - public void setFieldDataType(FieldDataType fieldDataType) { - checkIfFrozen(); - this.fieldDataType = fieldDataType; - } - public boolean hasDocValues() { return docValues; } @@ -304,15 +252,6 @@ public abstract class MappedFieldType extends FieldType { this.docValues = hasDocValues; } - public Loading normsLoading() { - return normsLoading; - } - - public void setNormsLoading(Loading normsLoading) { - checkIfFrozen(); - this.normsLoading = normsLoading; - } - public NamedAnalyzer indexAnalyzer() { return indexAnalyzer; } @@ -402,7 +341,7 @@ public abstract class MappedFieldType extends FieldType { public Query termQuery(Object value, @Nullable QueryShardContext context) { TermQuery query = new TermQuery(createTerm(value)); if (boost == 1f || - (context != null && context.indexVersionCreated().before(Version.V_5_0_0))) { + (context != null && context.indexVersionCreated().before(Version.V_5_0_0_alpha1))) { return query; } return new BoostQuery(query, boost); @@ -468,4 +407,21 @@ public abstract class MappedFieldType extends FieldType { public Query queryStringTermQuery(Term term) { return null; } + + protected final void failIfNoDocValues() { + if (hasDocValues() == false) { + throw new IllegalStateException("Can't load fielddata on [" + name() + + "] because fielddata is unsupported on fields of type [" + + typeName() + "]. Use doc values instead."); + } + } + + public boolean eagerGlobalOrdinals() { + return eagerGlobalOrdinals; + } + + public void setEagerGlobalOrdinals(boolean eagerGlobalOrdinals) { + checkIfFrozen(); + this.eagerGlobalOrdinals = eagerGlobalOrdinals; + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 4dd43db0517..6a9a402a5ff 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -76,6 +76,7 @@ public abstract class Mapper implements ToXContent, Iterable { return this.name; } + /** Returns a newly built mapper. */ public abstract Y build(BuilderContext context); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index b25f5f6a02d..eaf897e7fbd 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -27,17 +27,18 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.TypeMissingException; import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.script.ScriptService; import java.io.Closeable; @@ -81,9 +82,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } public static final String DEFAULT_MAPPING = "_default_"; - public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, true, Setting.Scope.INDEX); + public static final Setting INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = + Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope); public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; - public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX); + public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = + Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope); private static ObjectHashSet META_FIELDS = ObjectHashSet.from( "_uid", "_id", "_type", "_all", "_parent", "_routing", "_index", "_size", "_timestamp", "_ttl" @@ -330,7 +333,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) { - return mapper.type().startsWith(".") && !PercolatorService.TYPE_NAME.equals(mapper.type()); + return mapper.type().startsWith(".") && !PercolatorFieldMapper.TYPE_NAME.equals(mapper.type()); } private boolean assertSerialization(DocumentMapper mapper) { @@ -402,7 +405,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException { String defaultMappingSource; - if (PercolatorService.TYPE_NAME.equals(mappingType)) { + if (PercolatorFieldMapper.TYPE_NAME.equals(mappingType)) { defaultMappingSource = this.defaultPercolatorMappingSource; } else { defaultMappingSource = this.defaultMappingSource; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index a003fe153c2..da1ae91c9b5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -331,13 +331,13 @@ public abstract class ParseContext { } @Override - public void addDynamicMappingsUpdate(Mapper update) { - in.addDynamicMappingsUpdate(update); + public void addDynamicMapper(Mapper update) { + in.addDynamicMapper(update); } @Override - public Mapper dynamicMappingsUpdate() { - return in.dynamicMappingsUpdate(); + public List getDynamicMappers() { + return in.getDynamicMappers(); } } @@ -369,7 +369,7 @@ public abstract class ParseContext { private AllEntries allEntries = new AllEntries(); - private Mapper dynamicMappingsUpdate = null; + private List dynamicMappers = new ArrayList<>(); public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, ContentPath path) { this.indexSettings = indexSettings; @@ -394,7 +394,7 @@ public abstract class ParseContext { this.source = source == null ? null : sourceToParse.source(); this.path.reset(); this.allEntries = new AllEntries(); - this.dynamicMappingsUpdate = null; + this.dynamicMappers = new ArrayList<>(); } @Override @@ -536,18 +536,13 @@ public abstract class ParseContext { } @Override - public void addDynamicMappingsUpdate(Mapper mapper) { - assert mapper instanceof RootObjectMapper : mapper; - if (dynamicMappingsUpdate == null) { - dynamicMappingsUpdate = mapper; - } else { - dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false); - } + public void addDynamicMapper(Mapper mapper) { + dynamicMappers.add(mapper); } @Override - public Mapper dynamicMappingsUpdate() { - return dynamicMappingsUpdate; + public List getDynamicMappers() { + return dynamicMappers; } } @@ -747,12 +742,12 @@ public abstract class ParseContext { public abstract StringBuilder stringBuilder(); /** - * Add a dynamic update to the root object mapper. + * Add a new mapper dynamically created while parsing. */ - public abstract void addDynamicMappingsUpdate(Mapper update); + public abstract void addDynamicMapper(Mapper update); /** - * Get dynamic updates to the root object mapper. + * Get dynamic mappers created while parsing. */ - public abstract Mapper dynamicMappingsUpdate(); + public abstract List getDynamicMappers(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 9fc12a3dd0c..4eb8b3569d4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -131,6 +133,12 @@ public class BinaryFieldMapper extends FieldMapper { public Object valueForSearch(Object value) { return value(value); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new BytesBinaryDVIndexFieldData.Builder(); + } } protected BinaryFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index 4b49d644d5f..02d6a536812 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -29,6 +29,9 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -107,8 +110,6 @@ public class BooleanFieldMapper extends FieldMapper { } builder.nullValue(lenientNodeBooleanValue(propNode)); iterator.remove(); - } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { - iterator.remove(); } } return builder; @@ -191,6 +192,12 @@ public class BooleanFieldMapper extends FieldMapper { public boolean useTermQueryWithQueryString() { return true; } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.BOOLEAN); + } } protected BooleanFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 86818a3999e..c42f0b7cd42 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -38,6 +38,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericIntegerAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -185,6 +188,12 @@ public class ByteFieldMapper extends NumberFieldMapper { maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.BYTE); + } } protected ByteFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -252,7 +261,7 @@ public class ByteFieldMapper extends NumberFieldMapper { context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; Byte objValue = fieldType().nullValue(); @@ -285,7 +294,7 @@ public class ByteFieldMapper extends NumberFieldMapper { } if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomByteNumericField field = new CustomByteNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 057a8957121..c0b9173cea0 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -183,7 +183,6 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp private ContextMappings contextMappings = null; public CompletionFieldType() { - setFieldDataType(null); } private CompletionFieldType(CompletionFieldType ref) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 048c58297cb..5d66fb62c76 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -44,7 +44,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericDateAnalyzer; -import org.elasticsearch.index.fielddata.FieldDataType; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -252,7 +254,6 @@ public class DateFieldMapper extends NumberFieldMapper { public DateFieldType() { super(LegacyNumericType.LONG); - setFieldDataType(new FieldDataType("long")); } protected DateFieldType(DateFieldType ref) { @@ -417,10 +418,15 @@ public class DateFieldMapper extends NumberFieldMapper { } public long parseToMilliseconds(Object value, boolean inclusive, @Nullable DateTimeZone zone, @Nullable DateMathParser forcedDateParser) { + if (value instanceof Long) { + return ((Long) value).longValue(); + } + DateMathParser dateParser = dateMathParser(); if (forcedDateParser != null) { dateParser = forcedDateParser; } + String strValue; if (value instanceof BytesRef) { strValue = ((BytesRef) value).utf8ToString(); @@ -429,6 +435,12 @@ public class DateFieldMapper extends NumberFieldMapper { } return dateParser.parse(strValue, now(), inclusive, zone); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.LONG); + } } protected DateFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit ignoreMalformed,Explicit coerce, @@ -476,7 +488,7 @@ public class DateFieldMapper extends NumberFieldMapper { } else if (token == XContentParser.Token.VALUE_NUMBER) { dateAsString = parser.text(); } else if (token == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -511,7 +523,7 @@ public class DateFieldMapper extends NumberFieldMapper { if (value != null) { if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomLongNumericField field = new CustomLongNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index e7550dc1f92..69d56439b33 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -40,6 +40,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericDoubleAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -188,6 +191,12 @@ public class DoubleFieldMapper extends NumberFieldMapper { maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.DOUBLE); + } } protected DoubleFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Explicit ignoreMalformed, @@ -244,7 +253,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; Double objValue = fieldType().nullValue(); @@ -278,7 +287,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomDoubleNumericField field = new CustomDoubleNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 93cf3a7cfaf..48b9ca95875 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -41,6 +41,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericFloatAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -189,6 +192,12 @@ public class FloatFieldMapper extends NumberFieldMapper { maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.FLOAT); + } } protected FloatFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -256,7 +265,7 @@ public class FloatFieldMapper extends NumberFieldMapper { context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; Float objValue = fieldType().nullValue(); @@ -290,7 +299,7 @@ public class FloatFieldMapper extends NumberFieldMapper { if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomFloatNumericField field = new CustomFloatNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index fa7191cafbf..860377d0879 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -40,6 +40,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericIntegerAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -194,6 +197,12 @@ public class IntegerFieldMapper extends NumberFieldMapper { maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.INT); + } } protected IntegerFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -261,7 +270,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; Integer objValue = fieldType().nullValue(); @@ -298,7 +307,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { protected void addIntegerFields(ParseContext context, List fields, int value, float boost) { if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomIntegerNumericField field = new CustomIntegerNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java index 3f01493590c..ab36b738c83 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/KeywordFieldMapper.java @@ -29,6 +29,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -92,12 +95,9 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap return super.indexOptions(indexOptions); } - @Override - protected void setupFieldType(BuilderContext context) { - if (!omitNormsSet && fieldType.boost() != 1.0f) { - fieldType.setOmitNorms(false); - } - super.setupFieldType(context); + public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) { + fieldType().setEagerGlobalOrdinals(eagerGlobalOrdinals); + return builder; } @Override @@ -128,7 +128,11 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap } else if (propName.equals("ignore_above")) { builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); - } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { + } else if (propName.equals("norms")) { + builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode) == false); + iterator.remove(); + } else if (propName.equals("eager_global_ordinals")) { + builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode)); iterator.remove(); } } @@ -168,6 +172,12 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap } return termQuery(nullValue(), null); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder(); + } } private Boolean includeInAll; @@ -180,6 +190,13 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap this.ignoreAbove = ignoreAbove; } + /** Values that have more chars than the return value of this method will + * be skipped at parsing time. */ + // pkg-private for testing + int ignoreAbove() { + return ignoreAbove; + } + @Override protected KeywordFieldMapper clone() { return (KeywordFieldMapper) super.clone(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index a1acf0ab58a..b261df7a221 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -40,6 +40,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericLongAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -192,6 +195,12 @@ public class LongFieldMapper extends NumberFieldMapper { maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.LONG); + } } protected LongFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -249,7 +258,7 @@ public class LongFieldMapper extends NumberFieldMapper { context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; Long objValue = fieldType().nullValue(); @@ -282,7 +291,7 @@ public class LongFieldMapper extends NumberFieldMapper { } if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomLongNumericField field = new CustomLongNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 7c2a38eaee7..b73dfc59930 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -31,12 +31,17 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -52,7 +57,9 @@ import java.util.List; * */ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll { - private static final Setting COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, false, Setting.Scope.INDEX); // this is private since it has a different default + // this is private since it has a different default + private static final Setting COERCE_SETTING = + Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope); public static class Defaults { @@ -113,7 +120,6 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); - fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f); int precisionStep = fieldType.numericPrecisionStep(); if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) { fieldType.setNumericPrecisionStep(Integer.MAX_VALUE); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 56b1e9a78f2..eb00e0744c8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -40,6 +40,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericIntegerAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -190,6 +193,12 @@ public class ShortFieldMapper extends NumberFieldMapper { maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue ); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.SHORT); + } } protected ShortFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -257,7 +266,7 @@ public class ShortFieldMapper extends NumberFieldMapper { context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost); } } else if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; Short objValue = fieldType().nullValue(); @@ -290,7 +299,7 @@ public class ShortFieldMapper extends NumberFieldMapper { } if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomShortNumericField field = new CustomShortNumericField(value, fieldType()); - if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(boost); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index c4659a6571e..c87efcd9218 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -26,11 +26,18 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -39,9 +46,13 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Set; import static org.apache.lucene.index.IndexOptions.NONE; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; @@ -52,7 +63,20 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc public static final String CONTENT_TYPE = "string"; private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; + // If a string field is created on 5.x and all parameters are in this list then we + // will automatically upgrade to a text/keyword field. Otherwise we will just fail + // saying that string fields are not supported anymore. + private static final Set SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE = new HashSet<>(Arrays.asList( + "type", + // most common parameters, for which the upgrade is straightforward + "index", "store", "doc_values", "omit_norms", "norms", "fields", "copy_to", + "fielddata", "ignore_above")); + public static class Defaults { + public static double FIELDDATA_MIN_FREQUENCY = 0; + public static double FIELDDATA_MAX_FREQUENCY = Integer.MAX_VALUE; + public static int FIELDDATA_MIN_SEGMENT_SIZE = 0; + public static final MappedFieldType FIELD_TYPE = new StringFieldType(); static { @@ -83,6 +107,11 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc builder = this; } + @Override + public StringFieldType fieldType() { + return (StringFieldType) super.fieldType(); + } + @Override public Builder searchAnalyzer(NamedAnalyzer searchAnalyzer) { super.searchAnalyzer(searchAnalyzer); @@ -99,6 +128,31 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc return this; } + public Builder fielddata(boolean fielddata) { + fieldType().setFielddata(fielddata); + return builder; + } + + public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) { + fieldType().setEagerGlobalOrdinals(eagerGlobalOrdinals); + return builder; + } + + public Builder fielddataFrequencyFilter(double minFreq, double maxFreq, int minSegmentSize) { + fieldType().setFielddataMinFrequency(minFreq); + fieldType().setFielddataMaxFrequency(maxFreq); + fieldType().setFielddataMinSegmentSize(minSegmentSize); + return builder; + } + + @Override + protected void setupFieldType(BuilderContext context) { + super.setupFieldType(context); + if (fieldType().hasDocValues() && ((StringFieldType) fieldType()).fielddata()) { + ((StringFieldType) fieldType()).setFielddata(false); + } + } + @Override public StringFieldMapper build(BuilderContext context) { if (positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { @@ -123,20 +177,80 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } setupFieldType(context); StringFieldMapper fieldMapper = new StringFieldMapper( - name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove, + name, fieldType(), defaultFieldType, positionIncrementGap, ignoreAbove, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); return fieldMapper.includeInAll(includeInAll); } } public static class TypeParser implements Mapper.TypeParser { + private final DeprecationLogger deprecationLogger; + + public TypeParser() { + ESLogger logger = Loggers.getLogger(getClass()); + this.deprecationLogger = new DeprecationLogger(logger); + } + @Override public Mapper.Builder parse(String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { - // TODO: temporarily disabled to give Kibana time to upgrade to text/keyword mappings - /*if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { + if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1)) { + // Automatically upgrade simple mappings for ease of upgrade, otherwise fail + if (SUPPORTED_PARAMETERS_FOR_AUTO_UPGRADE.containsAll(node.keySet())) { + deprecationLogger.deprecated("The [string] field is deprecated, please use [text] or [keyword] instead on [{}]", + fieldName); + final Object index = node.remove("index"); + final boolean keyword = index != null && "analyzed".equals(index) == false; + { + // upgrade the index setting + node.put("index", "no".equals(index) == false); + } + { + // upgrade norms settings + Object norms = node.remove("norms"); + if (norms instanceof Map) { + norms = ((Map) norms).get("enabled"); + } + if (norms != null) { + node.put("norms", TypeParsers.nodeBooleanValue("norms", norms, parserContext)); + } + Object omitNorms = node.remove("omit_norms"); + if (omitNorms != null) { + node.put("norms", TypeParsers.nodeBooleanValue("omit_norms", omitNorms, parserContext) == false); + } + } + { + // upgrade fielddata settings + Object fielddataO = node.get("fielddata"); + if (fielddataO instanceof Map) { + Map fielddata = (Map) fielddataO; + if (keyword == false) { + node.put("fielddata", "disabled".equals(fielddata.get("format")) == false); + Map fielddataFilter = (Map) fielddata.get("filter"); + if (fielddataFilter != null) { + Map frequencyFilter = (Map) fielddataFilter.get("frequency"); + frequencyFilter.keySet().retainAll(Arrays.asList("min", "max", "min_segment_size")); + node.put("fielddata_frequency_filter", frequencyFilter); + } + } else { + node.remove("fielddata"); + } + final Object loading = fielddata.get("loading"); + if (loading != null) { + node.put("eager_global_ordinals", "eager_global_ordinals".equals(loading)); + } + } + } + if (keyword) { + return new KeywordFieldMapper.TypeParser().parse(fieldName, node, parserContext); + } else { + return new TextFieldMapper.TypeParser().parse(fieldName, node, parserContext); + } + + } throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] " + "or [keyword] field instead for field [" + fieldName + "]"); - }*/ + } + StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName); // hack for the fact that string can't just accept true/false for // the index property and still accepts no/not_analyzed/analyzed @@ -159,6 +273,21 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]"); } } + final Object fielddataObject = node.get("fielddata"); + if (fielddataObject instanceof Map) { + Map fielddata = (Map) fielddataObject; + final Object loading = fielddata.get("loading"); + if (loading != null) { + node.put("eager_global_ordinals", "eager_global_ordinals".equals(loading)); + } + Map fielddataFilter = (Map) fielddata.get("filter"); + if (fielddataFilter != null) { + Map frequencyFilter = (Map) fielddataFilter.get("frequency"); + frequencyFilter.keySet().retainAll(Arrays.asList("min", "max", "min_segment_size")); + node.put("fielddata_frequency_filter", frequencyFilter); + } + node.put("fielddata", "disabled".equals(fielddata.get("format")) == false); + } parseTextField(builder, fieldName, node, parserContext); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); @@ -191,7 +320,19 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } else if (propName.equals("ignore_above")) { builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1)); iterator.remove(); - } else if (parseMultiField(builder, fieldName, parserContext, propName, propNode)) { + } else if (propName.equals("fielddata")) { + builder.fielddata(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } else if (propName.equals("eager_global_ordinals")) { + builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } else if (propName.equals("fielddata_frequency_filter")) { + Map frequencyFilter = (Map) propNode; + double minFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("min"), 0); + double maxFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("max"), Integer.MAX_VALUE); + int minSegmentSize = XContentMapValues.nodeIntegerValue(frequencyFilter.remove("min_segment_size"), 0); + builder.fielddataFrequencyFilter(minFrequency, maxFrequency, minSegmentSize); + DocumentMapperParser.checkNoRemainingFields(propName, frequencyFilter, parserContext.indexVersionCreated()); iterator.remove(); } } @@ -201,10 +342,42 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc public static final class StringFieldType extends MappedFieldType { - public StringFieldType() {} + private boolean fielddata; + private double fielddataMinFrequency; + private double fielddataMaxFrequency; + private int fielddataMinSegmentSize; + + public StringFieldType() { + fielddata = true; + fielddataMinFrequency = Defaults.FIELDDATA_MIN_FREQUENCY; + fielddataMaxFrequency = Defaults.FIELDDATA_MAX_FREQUENCY; + fielddataMinSegmentSize = Defaults.FIELDDATA_MIN_SEGMENT_SIZE; + } protected StringFieldType(StringFieldType ref) { super(ref); + this.fielddata = ref.fielddata; + this.fielddataMinFrequency = ref.fielddataMinFrequency; + this.fielddataMaxFrequency = ref.fielddataMaxFrequency; + this.fielddataMinSegmentSize = ref.fielddataMinSegmentSize; + } + + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + StringFieldType that = (StringFieldType) o; + return fielddata == that.fielddata + && fielddataMinFrequency == that.fielddataMinFrequency + && fielddataMaxFrequency == that.fielddataMaxFrequency + && fielddataMinSegmentSize == that.fielddataMinSegmentSize; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), fielddata, + fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); } public StringFieldType clone() { @@ -216,6 +389,67 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc return CONTENT_TYPE; } + @Override + public void checkCompatibility(MappedFieldType other, + List conflicts, boolean strict) { + super.checkCompatibility(other, conflicts, strict); + StringFieldType otherType = (StringFieldType) other; + if (strict) { + if (fielddata() != otherType.fielddata()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] " + + "across all types."); + } + if (fielddataMinFrequency() != otherType.fielddataMinFrequency()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + + "[fielddata_frequency_filter.min] across all types."); + } + if (fielddataMaxFrequency() != otherType.fielddataMaxFrequency()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + + "[fielddata_frequency_filter.max] across all types."); + } + if (fielddataMinSegmentSize() != otherType.fielddataMinSegmentSize()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + + "[fielddata_frequency_filter.min_segment_size] across all types."); + } + } + } + + public boolean fielddata() { + return fielddata; + } + + public void setFielddata(boolean fielddata) { + checkIfFrozen(); + this.fielddata = fielddata; + } + + public double fielddataMinFrequency() { + return fielddataMinFrequency; + } + + public void setFielddataMinFrequency(double fielddataMinFrequency) { + checkIfFrozen(); + this.fielddataMinFrequency = fielddataMinFrequency; + } + + public double fielddataMaxFrequency() { + return fielddataMaxFrequency; + } + + public void setFielddataMaxFrequency(double fielddataMaxFrequency) { + checkIfFrozen(); + this.fielddataMaxFrequency = fielddataMaxFrequency; + } + + public int fielddataMinSegmentSize() { + return fielddataMinSegmentSize; + } + + public void setFielddataMinSegmentSize(int fielddataMinSegmentSize) { + checkIfFrozen(); + this.fielddataMinSegmentSize = fielddataMinSegmentSize; + } + @Override public String value(Object value) { if (value == null) { @@ -231,24 +465,42 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } return termQuery(nullValue(), null); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + if (hasDocValues()) { + return new DocValuesIndexFieldData.Builder(); + } else if (fielddata) { + return new PagedBytesIndexFieldData.Builder(fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); + } else { + throw new IllegalStateException("Fielddata is disabled on analyzed string fields by default. Set fielddata=true on [" + + name() + "] in order to load fielddata in memory by uninverting the inverted index. Note that this can however " + + "use significant memory."); + } + } } private Boolean includeInAll; private int positionIncrementGap; private int ignoreAbove; - protected StringFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + protected StringFieldMapper(String simpleName, StringFieldType fieldType, MappedFieldType defaultFieldType, int positionIncrementGap, int ignoreAbove, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); - // TODO: temporarily disabled to give Kibana time to upgrade to text/keyword mappings - /*if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0)) { + if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0_alpha1)) { throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] " + "or [keyword] field instead for field [" + fieldType.name() + "]"); - }*/ + } if (fieldType.tokenized() && fieldType.indexOptions() != NONE && fieldType().hasDocValues()) { throw new MapperParsingException("Field [" + fieldType.name() + "] cannot be analyzed and have doc values"); } + if (fieldType.hasDocValues() && ( + fieldType.fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY + || fieldType.fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY + || fieldType.fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE)) { + throw new MapperParsingException("Field [" + fieldType.name() + "] cannot have doc values and use fielddata filtering"); + } this.positionIncrementGap = positionIncrementGap; this.ignoreAbove = ignoreAbove; } @@ -319,7 +571,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { Field field = new Field(fieldType().name(), valueAndBoost.value(), fieldType()); - if (valueAndBoost.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (valueAndBoost.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(valueAndBoost.boost()); } fields.add(field); @@ -346,7 +598,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc return new ValueAndBoost(nullValue, defaultBoost); } if (parser.currentToken() == XContentParser.Token.START_OBJECT - && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { XContentParser.Token token; String currentFieldName = null; String value = nullValue; @@ -392,6 +644,11 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } } + @Override + public StringFieldType fieldType() { + return (StringFieldType) super.fieldType(); + } + @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); @@ -413,6 +670,27 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) { builder.field("ignore_above", ignoreAbove); } + if (includeDefaults || fieldType().fielddata() != ((StringFieldType) defaultFieldType).fielddata()) { + builder.field("fielddata", fieldType().fielddata()); + } + if (fieldType().fielddata()) { + if (includeDefaults + || fieldType().fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY + || fieldType().fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY + || fieldType().fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE) { + builder.startObject("fielddata_frequency_filter"); + if (includeDefaults || fieldType().fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY) { + builder.field("min", fieldType().fielddataMinFrequency()); + } + if (includeDefaults || fieldType().fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY) { + builder.field("max", fieldType().fielddataMaxFrequency()); + } + if (includeDefaults || fieldType().fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE) { + builder.field("min_segment_size", fieldType().fielddataMinSegmentSize()); + } + builder.endObject(); + } + } } /** diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TextFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TextFieldMapper.java index 1ec093ec8e0..0baa4564b69 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TextFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TextFieldMapper.java @@ -27,6 +27,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -38,6 +41,7 @@ import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField; @@ -49,6 +53,10 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; public static class Defaults { + public static double FIELDDATA_MIN_FREQUENCY = 0; + public static double FIELDDATA_MAX_FREQUENCY = Integer.MAX_VALUE; + public static int FIELDDATA_MIN_SEGMENT_SIZE = 0; + public static final MappedFieldType FIELD_TYPE = new TextFieldType(); static { @@ -72,6 +80,11 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu builder = this; } + @Override + public TextFieldType fieldType() { + return (TextFieldType) super.fieldType(); + } + public Builder positionIncrementGap(int positionIncrementGap) { if (positionIncrementGap < 0) { throw new MapperParsingException("[positions_increment_gap] must be positive, got " + positionIncrementGap); @@ -80,6 +93,11 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu return this; } + public Builder fielddata(boolean fielddata) { + fieldType().setFielddata(fielddata); + return builder; + } + @Override public Builder docValues(boolean docValues) { if (docValues) { @@ -88,6 +106,18 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu return super.docValues(docValues); } + public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) { + fieldType().setEagerGlobalOrdinals(eagerGlobalOrdinals); + return builder; + } + + public Builder fielddataFrequencyFilter(double minFreq, double maxFreq, int minSegmentSize) { + fieldType().setFielddataMinFrequency(minFreq); + fieldType().setFielddataMaxFrequency(maxFreq); + fieldType().setFielddataMinSegmentSize(minSegmentSize); + return builder; + } + @Override public TextFieldMapper build(BuilderContext context) { if (positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { @@ -119,7 +149,19 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu int newPositionIncrementGap = XContentMapValues.nodeIntegerValue(propNode, -1); builder.positionIncrementGap(newPositionIncrementGap); iterator.remove(); - } else if (parseMultiField(builder, fieldName, parserContext, propName, propNode)) { + } else if (propName.equals("fielddata")) { + builder.fielddata(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } else if (propName.equals("eager_global_ordinals")) { + builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } else if (propName.equals("fielddata_frequency_filter")) { + Map frequencyFilter = (Map) propNode; + double minFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("min"), 0); + double maxFrequency = XContentMapValues.nodeDoubleValue(frequencyFilter.remove("max"), Integer.MAX_VALUE); + int minSegmentSize = XContentMapValues.nodeIntegerValue(frequencyFilter.remove("min_segment_size"), 0); + builder.fielddataFrequencyFilter(minFrequency, maxFrequency, minSegmentSize); + DocumentMapperParser.checkNoRemainingFields(propName, frequencyFilter, parserContext.indexVersionCreated()); iterator.remove(); } } @@ -129,16 +171,110 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu public static final class TextFieldType extends MappedFieldType { - public TextFieldType() {} + private boolean fielddata; + private double fielddataMinFrequency; + private double fielddataMaxFrequency; + private int fielddataMinSegmentSize; + + public TextFieldType() { + // TODO: change the default to false + fielddata = true; + fielddataMinFrequency = Defaults.FIELDDATA_MIN_FREQUENCY; + fielddataMaxFrequency = Defaults.FIELDDATA_MAX_FREQUENCY; + fielddataMinSegmentSize = Defaults.FIELDDATA_MIN_SEGMENT_SIZE; + } protected TextFieldType(TextFieldType ref) { super(ref); + this.fielddata = ref.fielddata; + this.fielddataMinFrequency = ref.fielddataMinFrequency; + this.fielddataMaxFrequency = ref.fielddataMaxFrequency; + this.fielddataMinSegmentSize = ref.fielddataMinSegmentSize; } public TextFieldType clone() { return new TextFieldType(this); } + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + TextFieldType that = (TextFieldType) o; + return fielddata == that.fielddata + && fielddataMinFrequency == that.fielddataMinFrequency + && fielddataMaxFrequency == that.fielddataMaxFrequency + && fielddataMinSegmentSize == that.fielddataMinSegmentSize; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), fielddata, + fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); + } + + @Override + public void checkCompatibility(MappedFieldType other, + List conflicts, boolean strict) { + super.checkCompatibility(other, conflicts, strict); + TextFieldType otherType = (TextFieldType) other; + if (strict) { + if (fielddata() != otherType.fielddata()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] " + + "across all types."); + } + if (fielddataMinFrequency() != otherType.fielddataMinFrequency()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + + "[fielddata_frequency_filter.min] across all types."); + } + if (fielddataMaxFrequency() != otherType.fielddataMaxFrequency()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + + "[fielddata_frequency_filter.max] across all types."); + } + if (fielddataMinSegmentSize() != otherType.fielddataMinSegmentSize()) { + conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + + "[fielddata_frequency_filter.min_segment_size] across all types."); + } + } + } + + public boolean fielddata() { + return fielddata; + } + + public void setFielddata(boolean fielddata) { + checkIfFrozen(); + this.fielddata = fielddata; + } + + public double fielddataMinFrequency() { + return fielddataMinFrequency; + } + + public void setFielddataMinFrequency(double fielddataMinFrequency) { + checkIfFrozen(); + this.fielddataMinFrequency = fielddataMinFrequency; + } + + public double fielddataMaxFrequency() { + return fielddataMaxFrequency; + } + + public void setFielddataMaxFrequency(double fielddataMaxFrequency) { + checkIfFrozen(); + this.fielddataMaxFrequency = fielddataMaxFrequency; + } + + public int fielddataMinSegmentSize() { + return fielddataMinSegmentSize; + } + + public void setFielddataMinSegmentSize(int fielddataMinSegmentSize) { + checkIfFrozen(); + this.fielddataMinSegmentSize = fielddataMinSegmentSize; + } + @Override public String typeName() { return CONTENT_TYPE; @@ -159,6 +295,16 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu } return termQuery(nullValue(), null); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + if (fielddata == false) { + throw new IllegalStateException("Fielddata is disabled on text fields by default. Set fielddata=true on [" + name() + + "] in order to load fielddata in memory by uninverting the inverted index. Note that this can however " + + "use significant memory."); + } + return new PagedBytesIndexFieldData.Builder(fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); + } } private Boolean includeInAll; @@ -249,6 +395,11 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu this.includeInAll = ((TextFieldMapper) mergeWith).includeInAll; } + @Override + public TextFieldType fieldType() { + return (TextFieldType) super.fieldType(); + } + @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); @@ -263,5 +414,27 @@ public class TextFieldMapper extends FieldMapper implements AllFieldMapper.Inclu if (includeDefaults || positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { builder.field("position_increment_gap", positionIncrementGap); } + + if (includeDefaults || fieldType().fielddata() != ((TextFieldType) defaultFieldType).fielddata()) { + builder.field("fielddata", fieldType().fielddata()); + } + if (fieldType().fielddata()) { + if (includeDefaults + || fieldType().fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY + || fieldType().fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY + || fieldType().fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE) { + builder.startObject("fielddata_frequency_filter"); + if (includeDefaults || fieldType().fielddataMinFrequency() != Defaults.FIELDDATA_MIN_FREQUENCY) { + builder.field("min", fieldType().fielddataMinFrequency()); + } + if (includeDefaults || fieldType().fielddataMaxFrequency() != Defaults.FIELDDATA_MAX_FREQUENCY) { + builder.field("max", fieldType().fielddataMaxFrequency()); + } + if (includeDefaults || fieldType().fielddataMinSegmentSize() != Defaults.FIELDDATA_MIN_SEGMENT_SIZE) { + builder.field("min_segment_size", fieldType().fielddataMinSegmentSize()); + } + builder.endObject(); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index 15fcd9220e2..bc7c97bc4ff 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -25,25 +25,27 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.loader.SettingsLoader; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType.Loading; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import static org.elasticsearch.common.xcontent.support.XContentMapValues.isArray; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; @@ -63,10 +65,18 @@ public class TypeParsers { public static final String INDEX_OPTIONS_POSITIONS = "positions"; public static final String INDEX_OPTIONS_OFFSETS = "offsets"; - private static boolean nodeBooleanValue(Object node, Mapper.TypeParser.ParserContext parserContext) { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeParsers.class)); + private static final Set BOOLEAN_STRINGS = new HashSet<>(Arrays.asList("true", "false")); + + public static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) { + // Hook onto ParseFieldMatcher so that parsing becomes strict when setting index.query.parse.strict + if (parserContext.parseFieldMatcher().isStrict()) { return XContentMapValues.nodeBooleanValue(node); } else { + // TODO: remove this leniency in 6.0 + if (BOOLEAN_STRINGS.contains(node.toString()) == false) { + DEPRECATION_LOGGER.deprecated("Expected a boolean for property [{}] but got [{}]", name, node); + } return XContentMapValues.lenientNodeBooleanValue(node); } } @@ -81,13 +91,10 @@ public class TypeParsers { builder.precisionStep(nodeIntegerValue(propNode)); iterator.remove(); } else if (propName.equals("ignore_malformed")) { - builder.ignoreMalformed(nodeBooleanValue(propNode, parserContext)); + builder.ignoreMalformed(nodeBooleanValue("ignore_malformed", propNode, parserContext)); iterator.remove(); } else if (propName.equals("coerce")) { - builder.coerce(nodeBooleanValue(propNode, parserContext)); - iterator.remove(); - } else if (propName.equals("omit_norms")) { - builder.omitNorms(nodeBooleanValue(propNode, parserContext)); + builder.coerce(nodeBooleanValue("coerce", propNode, parserContext)); iterator.remove(); } else if (propName.equals("similarity")) { SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString()); @@ -112,16 +119,16 @@ public class TypeParsers { parseTermVector(name, propNode.toString(), builder); iterator.remove(); } else if (propName.equals("store_term_vectors")) { - builder.storeTermVectors(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectors(nodeBooleanValue("store_term_vectors", propNode, parserContext)); iterator.remove(); } else if (propName.equals("store_term_vector_offsets")) { - builder.storeTermVectorOffsets(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectorOffsets(nodeBooleanValue("store_term_vector_offsets", propNode, parserContext)); iterator.remove(); } else if (propName.equals("store_term_vector_positions")) { - builder.storeTermVectorPositions(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectorPositions(nodeBooleanValue("store_term_vector_positions", propNode, parserContext)); iterator.remove(); } else if (propName.equals("store_term_vector_payloads")) { - builder.storeTermVectorPayloads(nodeBooleanValue(propNode, parserContext)); + builder.storeTermVectorPayloads(nodeBooleanValue("store_term_vector_payloads", propNode, parserContext)); iterator.remove(); } else if (propName.equals("analyzer")) { NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString()); @@ -174,6 +181,37 @@ public class TypeParsers { } } + public static boolean parseNorms(FieldMapper.Builder builder, String propName, Object propNode, Mapper.TypeParser.ParserContext parserContext) { + if (propName.equals("norms")) { + if (propNode instanceof Map) { + final Map properties = nodeMapValue(propNode, "norms"); + for (Iterator> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) { + Entry entry2 = propsIterator.next(); + final String propName2 = Strings.toUnderscoreCase(entry2.getKey()); + final Object propNode2 = entry2.getValue(); + if (propName2.equals("enabled")) { + builder.omitNorms(!lenientNodeBooleanValue(propNode2)); + propsIterator.remove(); + } else if (propName2.equals("loading")) { + // ignore for bw compat + propsIterator.remove(); + } + } + DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated()); + DEPRECATION_LOGGER.deprecated("The [norms{enabled:true/false}] way of specifying norms is deprecated, please use [norms:true/false] instead"); + } else { + builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext) == false); + } + return true; + } else if (propName.equals("omit_norms")) { + builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext)); + DEPRECATION_LOGGER.deprecated("[omit_norms] is deprecated, please use [norms] instead with the opposite boolean value"); + return true; + } else { + return false; + } + } + /** * Parse text field attributes. In addition to {@link #parseField common attributes} * this will parse analysis and term-vectors related settings. @@ -181,6 +219,14 @@ public class TypeParsers { public static void parseTextField(FieldMapper.Builder builder, String name, Map fieldNode, Mapper.TypeParser.ParserContext parserContext) { parseField(builder, name, fieldNode, parserContext); parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext); + for (Iterator> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + final String propName = Strings.toUnderscoreCase(entry.getKey()); + final Object propNode = entry.getValue(); + if (parseNorms(builder, propName, propNode, parserContext)) { + iterator.remove(); + } + } } /** @@ -199,43 +245,30 @@ public class TypeParsers { builder.index(parseIndex(name, propNode.toString(), parserContext)); iterator.remove(); } else if (propName.equals(DOC_VALUES)) { - builder.docValues(nodeBooleanValue(propNode, parserContext)); + builder.docValues(nodeBooleanValue(DOC_VALUES, propNode, parserContext)); iterator.remove(); } else if (propName.equals("boost")) { builder.boost(nodeFloatValue(propNode)); iterator.remove(); - } else if (propName.equals("omit_norms")) { - builder.omitNorms(nodeBooleanValue(propNode, parserContext)); - iterator.remove(); - } else if (propName.equals("norms")) { - final Map properties = nodeMapValue(propNode, "norms"); - for (Iterator> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) { - Entry entry2 = propsIterator.next(); - final String propName2 = Strings.toUnderscoreCase(entry2.getKey()); - final Object propNode2 = entry2.getValue(); - if (propName2.equals("enabled")) { - builder.omitNorms(!lenientNodeBooleanValue(propNode2)); - propsIterator.remove(); - } else if (propName2.equals(Loading.KEY)) { - builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null)); - propsIterator.remove(); - } - } - DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated()); + } else if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1) + && parseNorms(builder, propName, propNode, parserContext)) { iterator.remove(); } else if (propName.equals("index_options")) { builder.indexOptions(nodeIndexOptionValue(propNode)); iterator.remove(); } else if (propName.equals("include_in_all")) { - builder.includeInAll(nodeBooleanValue(propNode, parserContext)); + builder.includeInAll(nodeBooleanValue("include_in_all", propNode, parserContext)); iterator.remove(); } else if (propName.equals("similarity")) { SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString()); builder.similarity(similarityProvider); iterator.remove(); - } else if (propName.equals("fielddata")) { - final Settings settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(propNode, "fielddata"))).build(); - builder.fieldDataSettings(settings); + } else if (propName.equals("fielddata") + && propNode instanceof Map + && parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1)) { + // ignore for bw compat + iterator.remove(); + } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); } else if (propName.equals("copy_to")) { if (parserContext.isWithinMultiField()) { @@ -243,7 +276,7 @@ public class TypeParsers { (indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) { throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field."); } else { - ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping."); + ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [{}] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.", name); } } else { parseCopyFields(propNode, builder); @@ -353,35 +386,32 @@ public class TypeParsers { } public static boolean parseIndex(String fieldName, String index, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { - switch (index) { - case "true": - return true; - case "false": - return false; - default: + switch (index) { + case "true": + return true; + case "false": + return false; + case "not_analyzed": + case "analyzed": + case "no": + if (parserContext.parseFieldMatcher().isStrict() == false) { + DEPRECATION_LOGGER.deprecated("Expected a boolean for property [index] but got [{}]", index); + return "no".equals(index) == false; + } else { throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]"); } - } else { - final String normalizedIndex = Strings.toUnderscoreCase(index); - switch (normalizedIndex) { - case "true": - case "not_analyzed": - case "analyzed": - return true; - case "false": - case "no": - return false; - default: - throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]"); - } + default: + throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]"); } } public static boolean parseStore(String fieldName, String store, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) { + if (parserContext.parseFieldMatcher().isStrict()) { return XContentMapValues.nodeBooleanValue(store); } else { + if (BOOLEAN_STRINGS.contains(store) == false) { + DEPRECATION_LOGGER.deprecated("Expected a boolean for property [store] but got [{}]", store); + } if ("no".equals(store)) { return false; } else if ("yes".equals(store)) { @@ -406,7 +436,7 @@ public class TypeParsers { } private static SimilarityProvider resolveSimilarity(Mapper.TypeParser.ParserContext parserContext, String name, String value) { - if (parserContext.indexVersionCreated().before(Version.V_5_0_0) && "default".equals(value)) { + if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1) && "default".equals(value)) { // "default" similarity has been renamed into "classic" in 3.x. value = SimilarityService.DEFAULT_SIMILARITY; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 1a1c1592d7e..fec0720ce70 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -28,10 +28,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -56,13 +60,13 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; */ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements ArrayValueMapperParser { public static final String CONTENT_TYPE = "geo_point"; + protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(BaseGeoPointFieldMapper.class)); public static class Names { public static final String LAT = "lat"; public static final String LAT_SUFFIX = "." + LAT; public static final String LON = "lon"; public static final String LON_SUFFIX = "." + LON; public static final String GEOHASH = "geohash"; - public static final String GEOHASH_SUFFIX = "." + GEOHASH; public static final String IGNORE_MALFORMED = "ignore_malformed"; } @@ -71,7 +75,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr public static final boolean ENABLE_GEOHASH = false; public static final boolean ENABLE_GEOHASH_PREFIX = false; public static final int GEO_HASH_PRECISION = GeoHashUtils.PRECISION; - public static final Explicit IGNORE_MALFORMED = new Explicit(false, false); + public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); } public abstract static class Builder extends FieldMapper.Builder { @@ -97,12 +101,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr return (GeoPointFieldType)fieldType; } - @Override - public T fieldDataSettings(Settings settings) { - this.fieldDataSettings = settings; - return builder; - } - public T enableLatLon(boolean enableLatLon) { this.enableLatLon = enableLatLon; return builder; @@ -194,9 +192,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr String propName = Strings.toUnderscoreCase(entry.getKey()); Object propNode = entry.getValue(); if (propName.equals("lat_lon")) { + deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed " + + "in the next major release"); builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode)); iterator.remove(); } else if (propName.equals("precision_step")) { + deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed " + + "in the next major release"); builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode)); iterator.remove(); } else if (propName.equals("geohash")) { @@ -218,8 +220,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } else if (propName.equals(Names.IGNORE_MALFORMED)) { builder.ignoreMalformed(XContentMapValues.lenientNodeBooleanValue(propNode)); iterator.remove(); - } else if (parseMultiField(builder, name, parserContext, propName, propNode)) { - iterator.remove(); } } @@ -339,6 +339,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr this.latFieldType = latFieldType; this.lonFieldType = lonFieldType; } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + return new AbstractGeoPointDVIndexFieldData.Builder(); + } } protected DoubleFieldMapper latMapper; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 57778fa8d25..1a0f2561a40 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -46,7 +46,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import java.io.IOException; import java.util.Iterator; @@ -454,7 +453,7 @@ public class GeoShapeFieldMapper extends FieldMapper { } for (Field field : fields) { if (!customBoost() && - fieldType.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + fieldType.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(fieldType().boost()); } context.doc().add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 97c2fa3933b..bc523383208 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; @@ -32,7 +31,6 @@ import org.elasticsearch.common.lucene.all.AllField; import org.elasticsearch.common.lucene.all.AllTermQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -168,7 +166,6 @@ public class AllFieldMapper extends MetadataFieldMapper { static final class AllFieldType extends MappedFieldType { public AllFieldType() { - setFieldDataType(new FieldDataType("string")); } protected AllFieldType(AllFieldType ref) { @@ -305,7 +302,7 @@ public class AllFieldMapper extends MetadataFieldMapper { builder.field("store_term_vector_payloads", fieldType().storeTermVectorPayloads()); } if (includeDefaults || fieldType().omitNorms() != Defaults.FIELD_TYPE.omitNorms()) { - builder.field("omit_norms", fieldType().omitNorms()); + builder.field("norms", !fieldType().omitNorms()); } doXContentAnalyzers(builder, includeDefaults); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 03ebcb9fe95..77dbe358265 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -22,16 +22,17 @@ package org.elasticsearch.index.mapper.internal; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.Query; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.ArrayList; @@ -130,7 +131,6 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; public FieldNamesFieldType() { - setFieldDataType(new FieldDataType("string")); } protected FieldNamesFieldType(FieldNamesFieldType ref) { @@ -192,6 +192,14 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { public boolean useTermQueryWithQueryString() { return true; } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + if (isEnabled() == false) { + throw new IllegalStateException("Cannot run [exists] queries if the [_field_names] field is disabled"); + } + return super.termQuery(value, context); + } } private FieldNamesFieldMapper(Settings indexSettings, MappedFieldType existing) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 1f26dd60841..2a21e8a7c69 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -112,7 +111,6 @@ public class IdFieldMapper extends MetadataFieldMapper { static final class IdFieldType extends MappedFieldType { public IdFieldType() { - setFieldDataType(new FieldDataType("string")); } protected IdFieldType(IdFieldType ref) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index d4aa2da4ab3..cb3e4ad2193 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.IndexIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -184,6 +186,11 @@ public class IndexFieldMapper extends MetadataFieldMapper { } return value.toString(); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + return new IndexIndexFieldData.Builder(); + } } private EnabledAttributeMapper enabledState; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 66e754e5fda..aedafc80209 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -30,11 +30,14 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -53,7 +56,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue; /** @@ -94,6 +96,11 @@ public class ParentFieldMapper extends MetadataFieldMapper { return builder; } + public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) { + ((ParentFieldType) fieldType()).setEagerGlobalOrdinals(eagerGlobalOrdinals); + return builder; + } + @Override public ParentFieldMapper build(BuilderContext context) { if (parentType == null) { @@ -106,6 +113,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { } public static class TypeParser implements MetadataFieldMapper.TypeParser { + private static final ParseField FIELDDATA = new ParseField("fielddata").withAllDeprecated("eager_global_ordinals"); @Override public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = new Builder(parserContext.type()); @@ -116,14 +124,16 @@ public class ParentFieldMapper extends MetadataFieldMapper { if (fieldName.equals("type")) { builder.type(fieldNode.toString()); iterator.remove(); - } else if (fieldName.equals("fielddata")) { - // Only take over `loading`, since that is the only option now that is configurable: + } else if (parserContext.parseFieldMatcher().match(fieldName, FIELDDATA)) { + // for bw compat only Map fieldDataSettings = SettingsLoader.Helper.loadNestedFromMap(nodeMapValue(fieldNode, "fielddata")); - if (fieldDataSettings.containsKey(MappedFieldType.Loading.KEY)) { - Settings settings = settingsBuilder().put(MappedFieldType.Loading.KEY, fieldDataSettings.get(MappedFieldType.Loading.KEY)).build(); - builder.fieldDataSettings(settings); + if (fieldDataSettings.containsKey("loading")) { + builder.eagerGlobalOrdinals("eager_global_ordinals".equals(fieldDataSettings.get("loading"))); } iterator.remove(); + } else if (fieldName.equals("eager_global_ordinals")) { + builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(fieldNode)); + iterator.remove(); } } return builder; @@ -143,7 +153,6 @@ public class ParentFieldMapper extends MetadataFieldMapper { parentJoinField.indexOptions(IndexOptions.NONE); parentJoinField.docValues(true); parentJoinField.fieldType().setDocValuesType(DocValuesType.SORTED); - parentJoinField.fieldType().setFieldDataType(null); return parentJoinField.build(context); } @@ -152,8 +161,8 @@ public class ParentFieldMapper extends MetadataFieldMapper { final String documentType; public ParentFieldType() { - setFieldDataType(new FieldDataType(NAME, settingsBuilder().put(MappedFieldType.Loading.KEY, Loading.EAGER_VALUE))); documentType = null; + setEagerGlobalOrdinals(true); } ParentFieldType(ParentFieldType ref, String documentType) { @@ -200,6 +209,11 @@ public class ParentFieldMapper extends MetadataFieldMapper { query.add(new TermQuery(new Term(TypeFieldMapper.NAME, documentType)), BooleanClause.Occur.FILTER); return query.build(); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + return new ParentChildIndexFieldData.Builder(); + } } private final String parentType; @@ -288,17 +302,13 @@ public class ParentFieldMapper extends MetadataFieldMapper { builder.startObject(CONTENT_TYPE); builder.field("type", parentType); - if (includeDefaults || joinFieldHasCustomFieldDataSettings()) { - builder.field("fielddata", (Map) fieldType().fieldDataType().getSettings().getAsMap()); + if (includeDefaults || fieldType().eagerGlobalOrdinals() != defaultFieldType.eagerGlobalOrdinals()) { + builder.field("eager_global_ordinals", fieldType().eagerGlobalOrdinals()); } builder.endObject(); return builder; } - private boolean joinFieldHasCustomFieldDataSettings() { - return fieldType != null && fieldType.fieldDataType() != null && fieldType.fieldDataType().equals(Defaults.FIELD_TYPE.fieldDataType()) == false; - } - @Override protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { super.doMerge(mergeWith, updateAllTypes); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index b1d24e53ab5..4954445da66 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -111,7 +110,6 @@ public class RoutingFieldMapper extends MetadataFieldMapper { static final class RoutingFieldType extends MappedFieldType { public RoutingFieldType() { - setFieldDataType(new FieldDataType("string")); } protected RoutingFieldType(RoutingFieldType ref) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index a102f03a5f1..3eeca0352a7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -124,7 +124,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(lenientNodeBooleanValue(fieldNode)); iterator.remove(); - } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_5_0_0)) { + } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1)) { // ignore on old indices, reject on and after 5.0 iterator.remove(); } else if (fieldName.equals("includes")) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 7c51b05cb4b..fc1e2a6a314 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.internal; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -95,7 +94,7 @@ public class TTLFieldMapper extends MetadataFieldMapper { public TTLFieldMapper build(BuilderContext context) { setupFieldType(context); fieldType.setHasDocValues(false); - return new TTLFieldMapper(fieldType, enabledState, defaultTTL, fieldDataSettings, context.indexSettings()); + return new TTLFieldMapper(fieldType, enabledState, defaultTTL, context.indexSettings()); } } @@ -161,11 +160,11 @@ public class TTLFieldMapper extends MetadataFieldMapper { private long defaultTTL; private TTLFieldMapper(Settings indexSettings) { - this(Defaults.TTL_FIELD_TYPE.clone(), Defaults.ENABLED_STATE, Defaults.DEFAULT, null, indexSettings); + this(Defaults.TTL_FIELD_TYPE.clone(), Defaults.ENABLED_STATE, Defaults.DEFAULT, indexSettings); } private TTLFieldMapper(MappedFieldType fieldType, EnabledAttributeMapper enabled, long defaultTTL, - @Nullable Settings fieldDataSettings, Settings indexSettings) { + Settings indexSettings) { super(NAME, fieldType, Defaults.TTL_FIELD_TYPE, indexSettings); this.enabledState = enabled; this.defaultTTL = defaultTTL; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index 72defadf6fd..b9e3434fc25 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -102,7 +101,6 @@ public class TypeFieldMapper extends MetadataFieldMapper { static final class TypeFieldType extends MappedFieldType { public TypeFieldType() { - setFieldDataType(new FieldDataType("string")); } protected TypeFieldType(TypeFieldType ref) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index f8fea4071e5..84aaedefbec 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -25,25 +25,24 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; +import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.mapper.Uid; import java.io.IOException; import java.util.List; import java.util.Map; -import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; - /** * */ @@ -104,7 +103,6 @@ public class UidFieldMapper extends MetadataFieldMapper { static final class UidFieldType extends MappedFieldType { public UidFieldType() { - setFieldDataType(new FieldDataType("string")); } protected UidFieldType(UidFieldType ref) { @@ -128,6 +126,15 @@ public class UidFieldMapper extends MetadataFieldMapper { } return Uid.createUid(value.toString()); } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + // TODO: add doc values support? + return new PagedBytesIndexFieldData.Builder( + TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY, + TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY, + TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE); + } } private UidFieldMapper(Settings indexSettings, MappedFieldType existing) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 027b2ef05ff..d019c4768b1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.index.DocValuesType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -82,7 +81,6 @@ public class VersionFieldMapper extends MetadataFieldMapper { static final class VersionFieldType extends MappedFieldType { public VersionFieldType() { - setFieldDataType(new FieldDataType("long")); } protected VersionFieldType(VersionFieldType ref) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 9a4cf70782b..b754c163669 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -22,12 +22,14 @@ package org.elasticsearch.index.mapper.ip; import org.apache.lucene.analysis.LegacyNumericTokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.LegacyNumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.LegacyNumericUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -41,7 +43,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NumericAnalyzer; import org.elasticsearch.index.analysis.NumericTokenizer; -import org.elasticsearch.index.fielddata.FieldDataType; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -160,7 +164,6 @@ public class IpFieldMapper extends NumberFieldMapper { public static final class IpFieldType extends LongFieldMapper.LongFieldType { public IpFieldType() { - setFieldDataType(new FieldDataType("long")); } protected IpFieldType(IpFieldType ref) { @@ -262,6 +265,19 @@ public class IpFieldMapper extends NumberFieldMapper { iValue + iSim, true, true); } + + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = LegacyNumericUtils.getMinLong(terms); + long maxValue = LegacyNumericUtils.getMaxLong(terms); + return new FieldStats.Ip(maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue); + } + + @Override + public IndexFieldData.Builder fielddataBuilder() { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder().numericType(NumericType.LONG); + } } protected IpFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, @@ -306,7 +322,7 @@ public class IpFieldMapper extends NumberFieldMapper { final long value = ipToLong(ipAsString); if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { CustomLongNumericField field = new CustomLongNumericField(value, fieldType()); - if (fieldType.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) { + if (fieldType.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0_alpha1)) { field.setBoost(fieldType().boost()); } fields.add(field); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index 58602f06dfa..3deb487e50b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -19,11 +19,15 @@ package org.elasticsearch.index.mapper.object; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MapperParsingException; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -33,30 +37,52 @@ import java.util.TreeMap; /** * */ -public class DynamicTemplate { +public class DynamicTemplate implements ToXContent { public static enum MatchType { - SIMPLE, - REGEX; + SIMPLE { + @Override + public boolean matches(String pattern, String value) { + return Regex.simpleMatch(pattern, value); + } + @Override + public String toString() { + return "simple"; + } + }, + REGEX { + @Override + public boolean matches(String pattern, String value) { + return value.matches(pattern); + } + @Override + public String toString() { + return "regex"; + } + }; public static MatchType fromString(String value) { - if ("simple".equals(value)) { - return SIMPLE; - } else if ("regex".equals(value)) { - return REGEX; + for (MatchType v : values()) { + if (v.toString().equals(value)) { + return v; + } } throw new IllegalArgumentException("No matching pattern matched on [" + value + "]"); } + + /** Whether {@code value} matches {@code regex}. */ + public abstract boolean matches(String regex, String value); } - public static DynamicTemplate parse(String name, Map conf) throws MapperParsingException { + public static DynamicTemplate parse(String name, Map conf, + Version indexVersionCreated) throws MapperParsingException { String match = null; String pathMatch = null; String unmatch = null; String pathUnmatch = null; Map mapping = null; String matchMappingType = null; - String matchPattern = "simple"; + String matchPattern = MatchType.SIMPLE.toString(); for (Map.Entry entry : conf.entrySet()) { String propName = Strings.toUnderscoreCase(entry.getKey()); @@ -74,22 +100,18 @@ public class DynamicTemplate { matchPattern = entry.getValue().toString(); } else if ("mapping".equals(propName)) { mapping = (Map) entry.getValue(); + } else if (indexVersionCreated.onOrAfter(Version.V_5_0_0_alpha1)) { + // unknown parameters were ignored before but still carried through serialization + // so we need to ignore them at parsing time for old indices + throw new IllegalArgumentException("Illegal dynamic template parameter: [" + propName + "]"); } } - if (match == null && pathMatch == null && matchMappingType == null) { - throw new MapperParsingException("template must have match, path_match or match_mapping_type set"); - } - if (mapping == null) { - throw new MapperParsingException("template must have mapping set"); - } - return new DynamicTemplate(name, conf, pathMatch, pathUnmatch, match, unmatch, matchMappingType, MatchType.fromString(matchPattern), mapping); + return new DynamicTemplate(name, pathMatch, pathUnmatch, match, unmatch, matchMappingType, MatchType.fromString(matchPattern), mapping); } private final String name; - private final Map conf; - private final String pathMatch; private final String pathUnmatch; @@ -104,9 +126,14 @@ public class DynamicTemplate { private final Map mapping; - public DynamicTemplate(String name, Map conf, String pathMatch, String pathUnmatch, String match, String unmatch, String matchMappingType, MatchType matchType, Map mapping) { + public DynamicTemplate(String name, String pathMatch, String pathUnmatch, String match, String unmatch, String matchMappingType, MatchType matchType, Map mapping) { + if (match == null && pathMatch == null && matchMappingType == null) { + throw new MapperParsingException("template must have match, path_match or match_mapping_type set"); + } + if (mapping == null) { + throw new MapperParsingException("template must have mapping set"); + } this.name = name; - this.conf = new TreeMap<>(conf); this.pathMatch = pathMatch; this.pathUnmatch = pathUnmatch; this.match = match; @@ -120,48 +147,55 @@ public class DynamicTemplate { return this.name; } - public Map conf() { - return this.conf; - } - public boolean match(ContentPath path, String name, String dynamicType) { - if (pathMatch != null && !patternMatch(pathMatch, path.pathAsText(name))) { + if (pathMatch != null && !matchType.matches(pathMatch, path.pathAsText(name))) { return false; } - if (match != null && !patternMatch(match, name)) { + if (match != null && !matchType.matches(match, name)) { return false; } - if (pathUnmatch != null && patternMatch(pathUnmatch, path.pathAsText(name))) { + if (pathUnmatch != null && matchType.matches(pathUnmatch, path.pathAsText(name))) { return false; } - if (unmatch != null && patternMatch(unmatch, name)) { + if (unmatch != null && matchType.matches(unmatch, name)) { return false; } if (matchMappingType != null) { if (dynamicType == null) { return false; } - if (!patternMatch(matchMappingType, dynamicType)) { + if (!matchType.matches(matchMappingType, dynamicType)) { return false; } } return true; } - public boolean hasType() { - return mapping.containsKey("type"); - } - public String mappingType(String dynamicType) { - return mapping.containsKey("type") ? mapping.get("type").toString().replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType) : dynamicType; - } - - private boolean patternMatch(String pattern, String str) { - if (matchType == MatchType.SIMPLE) { - return Regex.simpleMatch(pattern, str); + String type; + if (mapping.containsKey("type")) { + type = mapping.get("type").toString(); + type = type.replace("{dynamic_type}", dynamicType); + type = type.replace("{dynamicType}", dynamicType); + } else { + type = dynamicType; } - return str.matches(pattern); - } + if (type.equals(mapping.get("type")) == false // either the type was not set, or we updated it through replacements + && "text".equals(type)) { // and the result is "text" + // now that string has been splitted into text and keyword, we use text for + // dynamic mappings. However before it used to be possible to index as a keyword + // by setting index=not_analyzed, so for now we will use a keyword field rather + // than a text field if index=not_analyzed and the field type was not specified + // explicitly + // TODO: remove this in 6.0 + // TODO: how to do it in the future? + final Object index = mapping.get("index"); + if ("not_analyzed".equals(index) || "no".equals(index)) { + type = "keyword"; + } + } + return type; + } public Map mappingForName(String name, String dynamicType) { return processMap(mapping, name, dynamicType); @@ -200,40 +234,29 @@ public class DynamicTemplate { } @Override - public boolean equals(Object o) { - if (this == o) { - return true; + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (match != null) { + builder.field("match", match); } - if (o == null || getClass() != o.getClass()) { - return false; + if (pathMatch != null) { + builder.field("path_match", pathMatch); } - - DynamicTemplate that = (DynamicTemplate) o; - - // check if same matching, if so, replace the mapping - if (match != null ? !match.equals(that.match) : that.match != null) { - return false; + if (unmatch != null) { + builder.field("unmatch", unmatch); } - if (matchMappingType != null ? !matchMappingType.equals(that.matchMappingType) : that.matchMappingType != null) { - return false; + if (pathUnmatch != null) { + builder.field("path_unmatch", pathUnmatch); } - if (matchType != that.matchType) { - return false; + if (matchMappingType != null) { + builder.field("match_mapping_type", matchMappingType); } - if (unmatch != null ? !unmatch.equals(that.unmatch) : that.unmatch != null) { - return false; + if (matchType != MatchType.SIMPLE) { + builder.field("match_pattern", matchType); } - - return true; - } - - @Override - public int hashCode() { - // check if same matching, if so, replace the mapping - int result = match != null ? match.hashCode() : 0; - result = 31 * result + (unmatch != null ? unmatch.hashCode() : 0); - result = 31 * result + (matchType != null ? matchType.hashCode() : 0); - result = 31 * result + (matchMappingType != null ? matchMappingType.hashCode() : 0); - return result; + // use a sorted map for consistent serialization + builder.field("mapping", new TreeMap<>(mapping)); + builder.endObject(); + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index 00de61acdb6..7e5dc3d28f5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.object; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.FormatDateTimeFormatter; @@ -140,14 +141,15 @@ public class RootObjectMapper extends ObjectMapper { String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) - || processField(builder, fieldName, fieldNode)) { + || processField(builder, fieldName, fieldNode, parserContext.indexVersionCreated())) { iterator.remove(); } } return builder; } - protected boolean processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode) { + protected boolean processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode, + Version indexVersionCreated) { if (fieldName.equals("date_formats") || fieldName.equals("dynamic_date_formats")) { List dateTimeFormatters = new ArrayList<>(); if (fieldNode instanceof List) { @@ -185,7 +187,10 @@ public class RootObjectMapper extends ObjectMapper { throw new MapperParsingException("A dynamic template must be defined with a name"); } Map.Entry entry = tmpl.entrySet().iterator().next(); - ((Builder) builder).add(DynamicTemplate.parse(entry.getKey(), (Map) entry.getValue())); + String templateName = entry.getKey(); + Map templateParams = (Map) entry.getValue(); + DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams, indexVersionCreated); + ((Builder) builder).add(template); } return true; } else if (fieldName.equals("date_detection")) { @@ -329,8 +334,7 @@ public class RootObjectMapper extends ObjectMapper { builder.startArray("dynamic_templates"); for (DynamicTemplate dynamicTemplate : dynamicTemplates) { builder.startObject(); - builder.field(dynamicTemplate.name()); - builder.map(dynamicTemplate.conf()); + builder.field(dynamicTemplate.name(), dynamicTemplate); builder.endObject(); } builder.endArray(); diff --git a/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java b/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java index 8ee9d55c312..3218837261b 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java @@ -23,9 +23,12 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.queries.BlendedTermQuery; +import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -92,10 +95,17 @@ public final class ExtractQueryTermsService { * an UnsupportedQueryException is thrown. */ static Set extractQueryTerms(Query query) { - // TODO: add support for the TermsQuery when it has methods to access the actual terms it encapsulates // TODO: add support for span queries if (query instanceof TermQuery) { return Collections.singleton(((TermQuery) query).getTerm()); + } else if (query instanceof TermsQuery) { + Set terms = new HashSet<>(); + TermsQuery termsQuery = (TermsQuery) query; + PrefixCodedTerms.TermIterator iterator = termsQuery.getTermData().iterator(); + for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { + terms.add(new Term(iterator.field(), term)); + } + return terms; } else if (query instanceof PhraseQuery) { Term[] terms = ((PhraseQuery) query).getTerms(); if (terms.length == 0) { @@ -154,6 +164,12 @@ public final class ExtractQueryTermsService { } else if (query instanceof BoostQuery) { Query wrappedQuery = ((BoostQuery) query).getQuery(); return extractQueryTerms(wrappedQuery); + } else if (query instanceof CommonTermsQuery) { + List terms = ((CommonTermsQuery) query).getTerms(); + return new HashSet<>(terms); + } else if (query instanceof BlendedTermQuery) { + List terms = ((BlendedTermQuery) query).getTerms(); + return new HashSet<>(terms); } else { throw new UnsupportedQueryException(query); } diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolateStats.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolateStats.java deleted file mode 100644 index f4c899dff9a..00000000000 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolateStats.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.percolator; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; - -import java.io.IOException; - -/** - * Exposes percolator related statistics. - */ -public class PercolateStats implements Streamable, ToXContent { - - private long percolateCount; - private long percolateTimeInMillis; - private long current; - private long memorySizeInBytes = -1; - private long numQueries; - - /** - * Noop constructor for serialization purposes. - */ - public PercolateStats() { - } - - PercolateStats(long percolateCount, long percolateTimeInMillis, long current, long memorySizeInBytes, long numQueries) { - this.percolateCount = percolateCount; - this.percolateTimeInMillis = percolateTimeInMillis; - this.current = current; - this.memorySizeInBytes = memorySizeInBytes; - this.numQueries = numQueries; - } - - /** - * @return The number of times the percolate api has been invoked. - */ - public long getCount() { - return percolateCount; - } - - /** - * @return The total amount of time spend in the percolate api - */ - public long getTimeInMillis() { - return percolateTimeInMillis; - } - - /** - * @return The total amount of time spend in the percolate api - */ - public TimeValue getTime() { - return new TimeValue(getTimeInMillis()); - } - - /** - * @return The total amount of active percolate api invocations. - */ - public long getCurrent() { - return current; - } - - /** - * @return The total number of loaded percolate queries. - */ - public long getNumQueries() { - return numQueries; - } - - /** - * @return Temporarily returns -1, but this used to return the total size the loaded queries take in - * memory, but this is disabled now because the size estimation was too expensive cpu wise. This will be enabled - * again when a cheaper size estimation can be found. - */ - public long getMemorySizeInBytes() { - return memorySizeInBytes; - } - - /** - * @return The total size the loaded queries take in memory. - */ - public ByteSizeValue getMemorySize() { - return new ByteSizeValue(memorySizeInBytes); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.PERCOLATE); - builder.field(Fields.TOTAL, percolateCount); - builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, percolateTimeInMillis); - builder.field(Fields.CURRENT, current); - builder.field(Fields.MEMORY_SIZE_IN_BYTES, memorySizeInBytes); - builder.field(Fields.MEMORY_SIZE, getMemorySize()); - builder.field(Fields.QUERIES, getNumQueries()); - builder.endObject(); - return builder; - } - - public void add(PercolateStats percolate) { - if (percolate == null) { - return; - } - - percolateCount += percolate.getCount(); - percolateTimeInMillis += percolate.getTimeInMillis(); - current += percolate.getCurrent(); - numQueries += percolate.getNumQueries(); - } - - static final class Fields { - static final XContentBuilderString PERCOLATE = new XContentBuilderString("percolate"); - static final XContentBuilderString TOTAL = new XContentBuilderString("total"); - static final XContentBuilderString TIME = new XContentBuilderString("time"); - static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis"); - static final XContentBuilderString CURRENT = new XContentBuilderString("current"); - static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); - static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); - static final XContentBuilderString QUERIES = new XContentBuilderString("queries"); - } - - public static PercolateStats readPercolateStats(StreamInput in) throws IOException { - PercolateStats stats = new PercolateStats(); - stats.readFrom(in); - return stats; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - percolateCount = in.readVLong(); - percolateTimeInMillis = in.readVLong(); - current = in.readVLong(); - numQueries = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(percolateCount); - out.writeVLong(percolateTimeInMillis); - out.writeVLong(current); - out.writeVLong(numQueries); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java index f44d454655e..338de5c333d 100644 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorFieldMapper.java @@ -19,34 +19,50 @@ package org.elasticsearch.index.percolator; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.KeywordFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; public class PercolatorFieldMapper extends FieldMapper { + public static final String TYPE_NAME = ".percolator"; public static final String NAME = "query"; public static final String CONTENT_TYPE = "percolator"; public static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType(); private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms"; private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query"; + private static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field"; + public static final String EXTRACTED_TERMS_FULL_FIELD_NAME = NAME + "." + EXTRACTED_TERMS_FIELD_NAME; public static final String UNKNOWN_QUERY_FULL_FIELD_NAME = NAME + "." + UNKNOWN_QUERY_FIELD_NAME; + public static final String QUERY_BUILDER_FULL_FIELD_NAME = NAME + "." + QUERY_BUILDER_FIELD_NAME; public static class Builder extends FieldMapper.Builder { @@ -60,19 +76,29 @@ public class PercolatorFieldMapper extends FieldMapper { @Override public PercolatorFieldMapper build(BuilderContext context) { context.path().add(name); - KeywordFieldMapper extractedTermsField = createStringFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); - KeywordFieldMapper unknownQueryField = createStringFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); + KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder(EXTRACTED_TERMS_FIELD_NAME).build(context); + KeywordFieldMapper unknownQueryField = createExtractQueryFieldBuilder(UNKNOWN_QUERY_FIELD_NAME).build(context); + BinaryFieldMapper queryBuilderField = createQueryBuilderFieldBuilder().build(context); context.path().remove(); - return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField); + return new PercolatorFieldMapper(name(), fieldType, defaultFieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, queryShardContext, extractedTermsField, unknownQueryField, queryBuilderField); } - static KeywordFieldMapper.Builder createStringFieldBuilder(String name) { + static KeywordFieldMapper.Builder createExtractQueryFieldBuilder(String name) { KeywordFieldMapper.Builder queryMetaDataFieldBuilder = new KeywordFieldMapper.Builder(name); queryMetaDataFieldBuilder.docValues(false); queryMetaDataFieldBuilder.store(false); queryMetaDataFieldBuilder.indexOptions(IndexOptions.DOCS); return queryMetaDataFieldBuilder; } + + static BinaryFieldMapper.Builder createQueryBuilderFieldBuilder() { + BinaryFieldMapper.Builder builder = new BinaryFieldMapper.Builder(QUERY_BUILDER_FIELD_NAME); + builder.docValues(true); + builder.indexOptions(IndexOptions.NONE); + builder.store(false); + builder.fieldType().setDocValuesType(DocValuesType.BINARY); + return builder; + } } public static class TypeParser implements FieldMapper.TypeParser { @@ -111,26 +137,81 @@ public class PercolatorFieldMapper extends FieldMapper { private final QueryShardContext queryShardContext; private final KeywordFieldMapper queryTermsField; private final KeywordFieldMapper unknownQueryField; + private final BinaryFieldMapper queryBuilderField; - public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField) { + public PercolatorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo, QueryShardContext queryShardContext, + KeywordFieldMapper queryTermsField, KeywordFieldMapper unknownQueryField, + BinaryFieldMapper queryBuilderField) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.queryShardContext = queryShardContext; this.queryTermsField = queryTermsField; this.unknownQueryField = unknownQueryField; - this.mapUnmappedFieldAsString = PercolatorQueriesRegistry.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); + this.queryBuilderField = queryBuilderField; + this.mapUnmappedFieldAsString = PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); } @Override public Mapper parse(ParseContext context) throws IOException { QueryShardContext queryShardContext = new QueryShardContext(this.queryShardContext); - Query query = PercolatorQueriesRegistry.parseQuery(queryShardContext, mapUnmappedFieldAsString, context.parser()); + QueryBuilder queryBuilder = parseQueryBuilder(queryShardContext.parseContext(), context.parser()); + // Fetching of terms, shapes and indexed scripts happen during this rewrite: + queryBuilder = queryBuilder.rewrite(queryShardContext); + + try (XContentBuilder builder = XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE)) { + queryBuilder.toXContent(builder, new MapParams(Collections.emptyMap())); + builder.flush(); + byte[] queryBuilderAsBytes = builder.bytes().toBytes(); + context.doc().add(new Field(queryBuilderField.name(), queryBuilderAsBytes, queryBuilderField.fieldType())); + } + + Query query = toQuery(queryShardContext, mapUnmappedFieldAsString, queryBuilder); ExtractQueryTermsService.extractQueryTerms(query, context.doc(), queryTermsField.name(), unknownQueryField.name(), queryTermsField.fieldType()); return null; } + public static Query parseQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, XContentParser parser) throws IOException { + return toQuery(context, mapUnmappedFieldsAsString, parseQueryBuilder(context.parseContext(), parser)); + } + + static Query toQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryBuilder queryBuilder) throws IOException { + context.reset(); + // This means that fields in the query need to exist in the mapping prior to registering this query + // The reason that this is required, is that if a field doesn't exist then the query assumes defaults, which may be undesired. + // + // Even worse when fields mentioned in percolator queries do go added to map after the queries have been registered + // then the percolator queries don't work as expected any more. + // + // Query parsing can't introduce new fields in mappings (which happens when registering a percolator query), + // because field type can't be inferred from queries (like document do) so the best option here is to disallow + // the usage of unmapped fields in percolator queries to avoid unexpected behaviour + // + // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped fields which will be mapped + // as an analyzed string. + context.setAllowUnmappedFields(false); + context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); + context.parseFieldMatcher(context.getIndexSettings().getParseFieldMatcher()); + try { + return queryBuilder.toQuery(context); + } finally { + context.reset(); + } + } + + static QueryBuilder parseQueryBuilder(QueryParseContext context, XContentParser parser) { + context.reset(parser); + try { + return context.parseInnerQueryBuilder(); + } catch (IOException e) { + throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e); + } finally { + context.reset(null); + } + } + @Override public Iterator iterator() { - return Arrays.asList(queryTermsField, unknownQueryField).iterator(); + return Arrays.asList(queryTermsField, unknownQueryField, queryBuilderField).iterator(); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhase.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhase.java new file mode 100644 index 00000000000..c1f9720b53b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorHighlightSubFetchPhase.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.percolator; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; +import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.PercolatorQuery; +import org.elasticsearch.search.SearchParseElement; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.highlight.HighlightPhase; +import org.elasticsearch.search.highlight.SearchContextHighlight; +import org.elasticsearch.search.internal.InternalSearchHit; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.SubSearchContext; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +// Highlighting in the case of the percolator query is a bit different, because the PercolatorQuery itself doesn't get highlighted, +// but the source of the PercolatorQuery gets highlighted by each hit with type '.percolator' (percolator queries). +public class PercolatorHighlightSubFetchPhase implements FetchSubPhase { + + private final HighlightPhase highlightPhase; + + @Inject + public PercolatorHighlightSubFetchPhase(HighlightPhase highlightPhase) { + this.highlightPhase = highlightPhase; + } + + @Override + public boolean hitsExecutionNeeded(SearchContext context) { + return context.highlight() != null && locatePercolatorQuery(context.query()) != null; + } + + @Override + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { + PercolatorQuery percolatorQuery = locatePercolatorQuery(context.query()); + if (percolatorQuery == null) { + // shouldn't happen as we checked for the existence of a percolator query in hitsExecutionNeeded(...) + throw new IllegalStateException("couldn't locate percolator query"); + } + + List ctxs = context.searcher().getIndexReader().leaves(); + PercolatorQueryCache queriesRegistry = context.percolatorQueryCache(); + IndexSearcher percolatorIndexSearcher = percolatorQuery.getPercolatorIndexSearcher(); + + LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0); + FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); + SubSearchContext subSearchContext = + createSubSearchContext(context, percolatorLeafReaderContext, percolatorQuery.getDocumentSource()); + + for (InternalSearchHit hit : hits) { + if (PercolatorFieldMapper.TYPE_NAME.equals(hit.getType())) { + LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); + Query query = queriesRegistry.getQueries(ctx).getQuery(hit.docId() - ctx.docBase); + subSearchContext.parsedQuery(new ParsedQuery(query)); + hitContext.reset( + new InternalSearchHit(0, "unknown", new Text(percolatorQuery.getDocumentType()), Collections.emptyMap()), + percolatorLeafReaderContext, 0, percolatorIndexSearcher + ); + hitContext.cache().clear(); + highlightPhase.hitExecute(subSearchContext, hitContext); + hit.highlightFields().putAll(hitContext.hit().getHighlightFields()); + } + } + + } + + @Override + public Map parseElements() { + return Collections.emptyMap(); + } + + @Override + public boolean hitExecutionNeeded(SearchContext context) { + return false; + } + + @Override + public void hitExecute(SearchContext context, HitContext hitContext) { + } + + static PercolatorQuery locatePercolatorQuery(Query query) { + if (query instanceof PercolatorQuery) { + return (PercolatorQuery) query; + } else if (query instanceof BooleanQuery) { + for (BooleanClause clause : ((BooleanQuery) query).clauses()) { + PercolatorQuery result = locatePercolatorQuery(clause.getQuery()); + if (result != null) { + return result; + } + } + } else if (query instanceof ConstantScoreQuery) { + return locatePercolatorQuery(((ConstantScoreQuery) query).getQuery()); + } else if (query instanceof BoostQuery) { + return locatePercolatorQuery(((BoostQuery) query).getQuery()); + } + + return null; + } + + private SubSearchContext createSubSearchContext(SearchContext context, LeafReaderContext leafReaderContext, BytesReference source) { + SubSearchContext subSearchContext = new SubSearchContext(context); + subSearchContext.highlight(new SearchContextHighlight(context.highlight().fields())); + // Enforce highlighting by source, because MemoryIndex doesn't support stored fields. + subSearchContext.highlight().globalForceSource(true); + subSearchContext.lookup().source().setSegmentAndDocument(leafReaderContext, 0); + subSearchContext.lookup().source().setSource(source); + return subSearchContext; + } +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java deleted file mode 100644 index 67ba0aaf1d2..00000000000 --- a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.percolator; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.percolator.PercolatorService; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -/** - * Each shard will have a percolator registry even if there isn't a {@link PercolatorService#TYPE_NAME} document type in the index. - * For shards with indices that have no {@link PercolatorService#TYPE_NAME} document type, this will hold no percolate queries. - *

- * Once a document type has been created, the real-time percolator will start to listen to write events and update the - * this registry with queries in real time. - */ -public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable { - - public final static Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, false, Setting.Scope.INDEX); - - private final ConcurrentMap percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - private final QueryShardContext queryShardContext; - private boolean mapUnmappedFieldsAsString; - private final MeanMetric percolateMetric = new MeanMetric(); - private final CounterMetric currentMetric = new CounterMetric(); - private final CounterMetric numberOfQueries = new CounterMetric(); - - public PercolatorQueriesRegistry(ShardId shardId, IndexSettings indexSettings, QueryShardContext queryShardContext) { - super(shardId, indexSettings); - this.queryShardContext = queryShardContext; - this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); - } - - public ConcurrentMap getPercolateQueries() { - return percolateQueries; - } - - @Override - public void close() { - clear(); - } - - public void clear() { - percolateQueries.clear(); - } - - - public void addPercolateQuery(String idAsString, BytesReference source) { - Query newquery = parsePercolatorDocument(idAsString, source); - BytesRef id = new BytesRef(idAsString); - percolateQueries.put(id, newquery); - numberOfQueries.inc(); - - } - - public void removePercolateQuery(String idAsString) { - BytesRef id = new BytesRef(idAsString); - Query query = percolateQueries.remove(id); - if (query != null) { - numberOfQueries.dec(); - } - } - - public Query parsePercolatorDocument(String id, BytesReference source) { - try (XContentParser sourceParser = XContentHelper.createParser(source)) { - String currentFieldName = null; - XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT - if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchException("failed to parse query [" + id + "], not starting with OBJECT"); - } - while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = sourceParser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("query".equals(currentFieldName)) { - return parseQuery(queryShardContext, mapUnmappedFieldsAsString, sourceParser); - } else { - sourceParser.skipChildren(); - } - } else if (token == XContentParser.Token.START_ARRAY) { - sourceParser.skipChildren(); - } - } - } catch (Exception e) { - throw new PercolatorException(shardId().getIndex(), "failed to parse query [" + id + "]", e); - } - return null; - } - - public static Query parseQuery(QueryShardContext queryShardContext, boolean mapUnmappedFieldsAsString, XContentParser parser) { - QueryShardContext context = new QueryShardContext(queryShardContext); - try { - context.reset(parser); - // This means that fields in the query need to exist in the mapping prior to registering this query - // The reason that this is required, is that if a field doesn't exist then the query assumes defaults, which may be undesired. - // - // Even worse when fields mentioned in percolator queries do go added to map after the queries have been registered - // then the percolator queries don't work as expected any more. - // - // Query parsing can't introduce new fields in mappings (which happens when registering a percolator query), - // because field type can't be inferred from queries (like document do) so the best option here is to disallow - // the usage of unmapped fields in percolator queries to avoid unexpected behaviour - // - // if index.percolator.map_unmapped_fields_as_string is set to true, query can contain unmapped fields which will be mapped - // as an analyzed string. - context.setAllowUnmappedFields(false); - context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString); - return context.parseInnerQuery(); - } catch (IOException e) { - throw new ParsingException(parser.getTokenLocation(), "Failed to parse", e); - } finally { - context.reset(null); - } - } - - public void loadQueries(IndexReader reader) { - logger.trace("loading percolator queries..."); - final int loadedQueries; - try { - Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); - QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger); - IndexSearcher indexSearcher = new IndexSearcher(reader); - indexSearcher.setQueryCache(null); - indexSearcher.search(query, queryCollector); - Map queries = queryCollector.queries(); - for (Map.Entry entry : queries.entrySet()) { - percolateQueries.put(entry.getKey(), entry.getValue()); - numberOfQueries.inc(); - } - loadedQueries = queries.size(); - } catch (Exception e) { - throw new PercolatorException(shardId.getIndex(), "failed to load queries from percolator index", e); - } - logger.debug("done loading [{}] percolator queries", loadedQueries); - } - - public boolean isPercolatorQuery(Engine.Index operation) { - if (PercolatorService.TYPE_NAME.equals(operation.type())) { - parsePercolatorDocument(operation.id(), operation.source()); - return true; - } - return false; - } - - public boolean isPercolatorQuery(Engine.Delete operation) { - return PercolatorService.TYPE_NAME.equals(operation.type()); - } - - public synchronized void updatePercolateQuery(Engine engine, String id) { - // this can be called out of order as long as for every change to a percolator document it's invoked. This will always - // fetch the latest change but might fetch the same change twice if updates / deletes happen concurrently. - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(PercolatorService.TYPE_NAME, id))))) { - if (getResult.exists()) { - addPercolateQuery(id, getResult.source().source); - } else { - removePercolateQuery(id); - } - } - } - - public void prePercolate() { - currentMetric.inc(); - } - - public void postPercolate(long tookInNanos) { - currentMetric.dec(); - percolateMetric.inc(tookInNanos); - } - - /** - * @return The current metrics - */ - public PercolateStats stats() { - return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count()); - } - - // Enable when a more efficient manner is found for estimating the size of a Lucene query. - /*private static long computeSizeInMemory(HashedBytesRef id, Query query) { - long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length; - size += RamEstimator.sizeOf(query); - return size; - } - - private static final class RamEstimator { - // we move this into it's own class to exclude it from the forbidden API checks - // it's fine to use here! - static long sizeOf(Query query) { - return RamUsageEstimator.sizeOf(query); - } - }*/ -} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCache.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCache.java new file mode 100644 index 00000000000..78d8af06827 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCache.java @@ -0,0 +1,264 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.percolator; + +import com.carrotsearch.hppc.IntObjectHashMap; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexWarmer; +import org.elasticsearch.index.IndexWarmer.TerminationHandle; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.Searcher; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.query.PercolatorQuery; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.function.Supplier; + +public final class PercolatorQueryCache extends AbstractIndexComponent + implements Closeable, LeafReader.CoreClosedListener, PercolatorQuery.QueryRegistry { + + public final static Setting INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = + Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope); + + public final static XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE; + + private final Supplier queryShardContextSupplier; + private final Cache cache; + private final boolean mapUnmappedFieldsAsString; + + public PercolatorQueryCache(IndexSettings indexSettings, Supplier queryShardContextSupplier) { + super(indexSettings); + this.queryShardContextSupplier = queryShardContextSupplier; + cache = CacheBuilder.builder().build(); + this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING); + } + + @Override + public Leaf getQueries(LeafReaderContext ctx) { + QueriesLeaf percolatorQueries = cache.get(ctx.reader().getCoreCacheKey()); + if (percolatorQueries == null) { + throw new IllegalStateException("queries not loaded, queries should be have been preloaded during index warming..."); + } + return percolatorQueries; + } + + public IndexWarmer.Listener createListener(ThreadPool threadPool) { + return new IndexWarmer.Listener() { + + final Executor executor = threadPool.executor(ThreadPool.Names.WARMER); + + @Override + public TerminationHandle warmReader(IndexShard indexShard, Searcher searcher) { + final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size()); + for (final LeafReaderContext ctx : searcher.reader().leaves()) { + if (cache.get(ctx.reader().getCoreCacheKey()) != null) { + latch.countDown(); + continue; + } + executor.execute(() -> { + try { + final long start = System.nanoTime(); + QueriesLeaf queries = loadQueries(ctx, indexShard.indexSettings().getIndexVersionCreated()); + cache.put(ctx.reader().getCoreCacheKey(), queries); + if (indexShard.warmerService().logger().isTraceEnabled()) { + indexShard.warmerService().logger().trace( + "loading percolator queries took [{}]", + TimeValue.timeValueNanos(System.nanoTime() - start) + ); + } + } catch (Throwable t) { + indexShard.warmerService().logger().warn("failed to load percolator queries", t); + } finally { + latch.countDown(); + } + }); + } + return () -> latch.await(); + } + }; + } + + QueriesLeaf loadQueries(LeafReaderContext context, Version indexVersionCreated) throws IOException { + LeafReader leafReader = context.reader(); + ShardId shardId = ShardUtils.extractShardId(leafReader); + if (shardId == null) { + throw new IllegalStateException("can't resolve shard id"); + } + if (indexSettings.getIndex().equals(shardId.getIndex()) == false) { + // percolator cache insanity + String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " + indexSettings.getIndex(); + throw new IllegalStateException(message); + } + + IntObjectHashMap queries = new IntObjectHashMap<>(); + boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1); + PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME), PostingsEnum.NONE); + if (postings != null) { + if (legacyLoading) { + LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor(); + for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) { + leafReader.document(docId, visitor); + queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source)); + visitor.source = null; // reset + } + } else { + BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(PercolatorFieldMapper.QUERY_BUILDER_FULL_FIELD_NAME); + if (binaryDocValues != null) { + for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) { + BytesRef queryBuilder = binaryDocValues.get(docId); + if (queryBuilder.length > 0) { + queries.put(docId, parseQueryBuilder(docId, queryBuilder)); + } + } + } + } + } + leafReader.addCoreClosedListener(this); + return new QueriesLeaf(shardId, queries); + } + + private Query parseQueryBuilder(int docId, BytesRef queryBuilder) { + XContent xContent = QUERY_BUILDER_CONTENT_TYPE.xContent(); + try (XContentParser sourceParser = xContent.createParser(queryBuilder.bytes, queryBuilder.offset, queryBuilder.length)) { + QueryShardContext context = queryShardContextSupplier.get(); + return PercolatorFieldMapper.parseQuery(context, mapUnmappedFieldsAsString, sourceParser); + } catch (IOException e) { + throw new PercolatorException(index(), "failed to parse query builder for document [" + docId + "]", e); + } + } + + private Query parseLegacyPercolatorDocument(int docId, BytesReference source) { + try (XContentParser sourceParser = XContentHelper.createParser(source)) { + String currentFieldName = null; + XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT + if (token != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchException("failed to parse query [" + docId + "], not starting with OBJECT"); + } + while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = sourceParser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("query".equals(currentFieldName)) { + QueryShardContext context = queryShardContextSupplier.get(); + return PercolatorFieldMapper.parseQuery(context, mapUnmappedFieldsAsString, sourceParser); + } else { + sourceParser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + sourceParser.skipChildren(); + } + } + } catch (Exception e) { + throw new PercolatorException(index(), "failed to parse query [" + docId + "]", e); + } + return null; + } + + public PercolatorQueryCacheStats getStats(ShardId shardId) { + int numberOfQueries = 0; + for (QueriesLeaf queries : cache.values()) { + if (shardId.equals(queries.shardId)) { + numberOfQueries += queries.queries.size(); + } + } + return new PercolatorQueryCacheStats(numberOfQueries); + } + + @Override + public void onClose(Object cacheKey) throws IOException { + cache.invalidate(cacheKey); + } + + @Override + public void close() throws IOException { + cache.invalidateAll(); + } + + final static class LegacyQueryFieldVisitor extends StoredFieldVisitor { + + private BytesArray source; + + @Override + public void binaryField(FieldInfo fieldInfo, byte[] bytes) throws IOException { + source = new BytesArray(bytes); + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + if (source != null) { + return Status.STOP; + } + if (SourceFieldMapper.NAME.equals(fieldInfo.name)) { + return Status.YES; + } else { + return Status.NO; + } + } + + } + + final static class QueriesLeaf implements Leaf { + + final ShardId shardId; + final IntObjectHashMap queries; + + QueriesLeaf(ShardId shardId, IntObjectHashMap queries) { + this.shardId = shardId; + this.queries = queries; + } + + @Override + public Query getQuery(int docId) { + return queries.get(docId); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCacheStats.java b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCacheStats.java new file mode 100644 index 00000000000..a8e3b7f4799 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/percolator/PercolatorQueryCacheStats.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.percolator; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; + +import java.io.IOException; + +/** + * Exposes percolator query cache statistics. + */ +public class PercolatorQueryCacheStats implements Streamable, ToXContent { + + private long numQueries; + + /** + * Noop constructor for serialization purposes. + */ + public PercolatorQueryCacheStats() { + } + + PercolatorQueryCacheStats(long numQueries) { + this.numQueries = numQueries; + } + + /** + * @return The total number of loaded percolate queries. + */ + public long getNumQueries() { + return numQueries; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.PERCOLATOR); + builder.field(Fields.QUERIES, getNumQueries()); + builder.endObject(); + return builder; + } + + public void add(PercolatorQueryCacheStats percolate) { + if (percolate == null) { + return; + } + + numQueries += percolate.getNumQueries(); + } + + static final class Fields { + static final XContentBuilderString PERCOLATOR = new XContentBuilderString("percolator"); + static final XContentBuilderString QUERIES = new XContentBuilderString("num_queries"); + } + + public static PercolatorQueryCacheStats readPercolateStats(StreamInput in) throws IOException { + PercolatorQueryCacheStats stats = new PercolatorQueryCacheStats(); + stats.readFrom(in); + return stats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + numQueries = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numQueries); + } +} diff --git a/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java b/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java deleted file mode 100644 index 1bea43e4ea1..00000000000 --- a/core/src/main/java/org/elasticsearch/index/percolator/QueriesLoaderCollector.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.percolator; - -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.SimpleCollector; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fieldvisitor.FieldsVisitor; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -/** - */ -final class QueriesLoaderCollector extends SimpleCollector { - - private final Map queries = new HashMap<>(); - private final FieldsVisitor fieldsVisitor = new FieldsVisitor(true); - private final PercolatorQueriesRegistry percolator; - private final ESLogger logger; - - private LeafReader reader; - - QueriesLoaderCollector(PercolatorQueriesRegistry percolator, ESLogger logger) { - this.percolator = percolator; - this.logger = logger; - } - - public Map queries() { - return this.queries; - } - - @Override - public void collect(int doc) throws IOException { - fieldsVisitor.reset(); - reader.document(doc, fieldsVisitor); - final Uid uid = fieldsVisitor.uid(); - - try { - // id is only used for logging, if we fail we log the id in the catch statement - final Query parseQuery = percolator.parsePercolatorDocument(null, fieldsVisitor.source()); - if (parseQuery != null) { - queries.put(new BytesRef(uid.id()), parseQuery); - } else { - logger.warn("failed to add query [{}] - parser returned null", uid); - } - - } catch (Exception e) { - logger.warn("failed to add query [{}]", e, uid); - } - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - reader = context.reader(); - } - - @Override - public void setScorer(Scorer scorer) throws IOException { - } - - @Override - public boolean needsScores() { - return false; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java index 1ad64c42135..318a0b33805 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java @@ -42,7 +42,7 @@ public class ConstantScoreQueryParser implements QueryParser query = null; boolean queryFound = false; String queryName = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; @@ -56,6 +56,10 @@ public class ConstantScoreQueryParser implements QueryParser return Queries.newMatchNoDocsQuery(); } - ObjectMapper objectMapper = context.getObjectMapper(fieldPattern); - if (objectMapper != null) { - // automatic make the object mapper pattern - fieldPattern = fieldPattern + ".*"; - } - - Collection fields = context.simpleMatchToIndexNames(fieldPattern); - if (fields.isEmpty()) { - // no fields exists, so we should not match anything - return Queries.newMatchNoDocsQuery(); + final Collection fields; + if (context.getObjectMapper(fieldPattern) != null) { + // the _field_names field also indexes objects, so we don't have to + // do any more work to support exists queries on whole objects + fields = Collections.singleton(fieldPattern); + } else { + fields = context.simpleMatchToIndexNames(fieldPattern); } BooleanQuery.Builder boolFilterBuilder = new BooleanQuery.Builder(); for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - Query filter = null; - if (fieldNamesFieldType.isEnabled()) { - final String f; - if (fieldType != null) { - f = fieldType.name(); - } else { - f = field; - } - filter = fieldNamesFieldType.termQuery(f, context); - } - // if _field_names are not indexed, we need to go the slow way - if (filter == null && fieldType != null) { - filter = fieldType.rangeQuery(null, null, true, true); - } - if (filter == null) { - filter = new TermRangeQuery(field, null, null, true, true); - } + Query filter = fieldNamesFieldType.termQuery(field, context); boolFilterBuilder.add(filter, BooleanClause.Occur.SHOULD); } return new ConstantScoreQuery(boolFilterBuilder.build()); diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 05c2a74bb9f..2c906dc7cb1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -250,7 +250,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder use prefix encoded postings format diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 784c924efcf..b11b57df175 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -219,18 +219,18 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilderhas_child queries. @@ -104,12 +105,21 @@ public class HasChildQueryParser implements QueryParser { return ScoreMode.Max; } else if ("avg".equals(scoreModeString)) { return ScoreMode.Avg; - } else if ("total".equals(scoreModeString)) { + } else if ("sum".equals(scoreModeString)) { return ScoreMode.Total; } throw new IllegalArgumentException("No score mode for child query [" + scoreModeString + "] found"); } + public static String scoreModeAsString(ScoreMode scoreMode) { + if (scoreMode == ScoreMode.Total) { + // Lucene uses 'total' but 'sum' is more consistent with other elasticsearch APIs + return "sum"; + } else { + return scoreMode.name().toLowerCase(Locale.ROOT); + } + } + @Override public HasChildQueryBuilder getBuilderPrototype() { return HasChildQueryBuilder.PROTOTYPE; diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 596c2499211..bd5f348db33 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -121,7 +121,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder query.toXContent(builder, params); builder.field(NestedQueryParser.PATH_FIELD.getPreferredName(), path); if (scoreMode != null) { - builder.field(NestedQueryParser.SCORE_MODE_FIELD.getPreferredName(), scoreMode.name().toLowerCase(Locale.ROOT)); + builder.field(NestedQueryParser.SCORE_MODE_FIELD.getPreferredName(), HasChildQueryParser.scoreModeAsString(scoreMode)); } printBoostAndQueryName(builder); if (queryInnerHits != null) { diff --git a/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java index ba5d7c2447e..218919f7ed2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java @@ -68,20 +68,7 @@ public class NestedQueryParser implements QueryParser { } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { boost = parser.floatValue(); } else if (parseContext.parseFieldMatcher().match(currentFieldName, SCORE_MODE_FIELD)) { - String sScoreMode = parser.text(); - if ("avg".equals(sScoreMode)) { - scoreMode = ScoreMode.Avg; - } else if ("min".equals(sScoreMode)) { - scoreMode = ScoreMode.Min; - } else if ("max".equals(sScoreMode)) { - scoreMode = ScoreMode.Max; - } else if ("total".equals(sScoreMode) || "sum".equals(sScoreMode)) { - scoreMode = ScoreMode.Total; - } else if ("none".equals(sScoreMode)) { - scoreMode = ScoreMode.None; - } else { - throw new ParsingException(parser.getTokenLocation(), "illegal score_mode for nested query [" + sScoreMode + "]"); - } + scoreMode = HasChildQueryParser.parseScoreMode(parser.text()); } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { queryName = parser.text(); } else { diff --git a/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java index f9bd7623f35..4a2efa95c9a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ParentIdQueryBuilder.java @@ -19,13 +19,18 @@ package org.elasticsearch.index.query; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocValuesTermsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import java.io.IOException; import java.util.Objects; @@ -71,7 +76,12 @@ public final class ParentIdQueryBuilder extends AbstractQueryBuilder percolatorQueries; - private Query percolateQuery; private Query queriesMetaDataQuery; private final Query percolateTypeQuery; /** - * @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated - * @param percolatorQueries All the registered percolator queries - * @param percolateTypeQuery A query that identifies all document containing percolator queries + * @param docType The type of the document being percolated + * @param queryRegistry The registry holding all the percolator queries as Lucene queries. + * @param documentSource The source of the document being percolated + * @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated + * @param percolateTypeQuery A query that identifies all document containing percolator queries */ - Builder(IndexSearcher percolatorIndexSearcher, Map percolatorQueries, Query percolateTypeQuery) { - this.percolatorIndexSearcher = percolatorIndexSearcher; - this.percolatorQueries = percolatorQueries; - this.percolateTypeQuery = percolateTypeQuery; - } - - /** - * Optionally sets a query that reduces the number of queries to percolate based on custom metadata attached - * on the percolator documents. - */ - void setPercolateQuery(Query percolateQuery) { - this.percolateQuery = percolateQuery; + public Builder(String docType, QueryRegistry queryRegistry, BytesReference documentSource, IndexSearcher percolatorIndexSearcher, + Query percolateTypeQuery) { + this.docType = Objects.requireNonNull(docType); + this.documentSource = Objects.requireNonNull(documentSource); + this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher); + this.queryRegistry = Objects.requireNonNull(queryRegistry); + this.percolateTypeQuery = Objects.requireNonNull(percolateTypeQuery); } /** @@ -85,39 +81,43 @@ final class PercolatorQuery extends Query { * @param extractedTermsFieldName The name of the field to get the extracted terms from * @param unknownQueryFieldname The field used to mark documents whose queries couldn't all get extracted */ - void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException { - this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery(percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname); + public void extractQueryTermsQuery(String extractedTermsFieldName, String unknownQueryFieldname) throws IOException { + this.queriesMetaDataQuery = ExtractQueryTermsService.createQueryTermsQuery( + percolatorIndexSearcher.getIndexReader(), extractedTermsFieldName, unknownQueryFieldname + ); } - PercolatorQuery build() { + public PercolatorQuery build() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(percolateTypeQuery, FILTER); if (queriesMetaDataQuery != null) { builder.add(queriesMetaDataQuery, FILTER); } - if (percolateQuery != null){ - builder.add(percolateQuery, MUST); - } - return new PercolatorQuery(builder.build(), percolatorIndexSearcher, percolatorQueries); + return new PercolatorQuery(docType, queryRegistry, documentSource, builder.build(), percolatorIndexSearcher); } } + private final String documentType; + private final QueryRegistry queryRegistry; + private final BytesReference documentSource; private final Query percolatorQueriesQuery; private final IndexSearcher percolatorIndexSearcher; - private final Map percolatorQueries; - private PercolatorQuery(Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher, Map percolatorQueries) { + private PercolatorQuery(String documentType, QueryRegistry queryRegistry, BytesReference documentSource, + Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher) { + this.documentType = documentType; + this.documentSource = documentSource; this.percolatorQueriesQuery = percolatorQueriesQuery; + this.queryRegistry = queryRegistry; this.percolatorIndexSearcher = percolatorIndexSearcher; - this.percolatorQueries = percolatorQueries; } @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = percolatorQueriesQuery.rewrite(reader); if (rewritten != percolatorQueriesQuery) { - return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries); + return new PercolatorQuery(documentType, queryRegistry, documentSource, rewritten, percolatorIndexSearcher); } else { return this; } @@ -160,7 +160,7 @@ final class PercolatorQuery extends Query { return null; } - final LeafReader leafReader = leafReaderContext.reader(); + final QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext); return new Scorer(this) { @Override @@ -173,7 +173,7 @@ final class PercolatorQuery extends Query { return new TwoPhaseIterator(approximation.iterator()) { @Override public boolean matches() throws IOException { - return matchDocId(approximation.docID(), leafReader); + return matchDocId(approximation.docID()); } @Override @@ -198,27 +198,30 @@ final class PercolatorQuery extends Query { return approximation.docID(); } - boolean matchDocId(int docId, LeafReader leafReader) throws IOException { - SingleFieldsVisitor singleFieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME); - leafReader.document(docId, singleFieldsVisitor); - BytesRef percolatorQueryId = new BytesRef(singleFieldsVisitor.uid().id()); - return matchQuery(percolatorQueryId); + boolean matchDocId(int docId) throws IOException { + Query query = percolatorQueries.getQuery(docId); + if (query != null) { + return Lucene.exists(percolatorIndexSearcher, query); + } else { + return false; + } } }; } }; } - boolean matchQuery(BytesRef percolatorQueryId) throws IOException { - Query percolatorQuery = percolatorQueries.get(percolatorQueryId); - if (percolatorQuery != null) { - return Lucene.exists(percolatorIndexSearcher, percolatorQuery); - } else { - return false; - } + public IndexSearcher getPercolatorIndexSearcher() { + return percolatorIndexSearcher; } - private final Object instance = new Object(); + public String getDocumentType() { + return documentType; + } + + public BytesReference getDocumentSource() { + return documentSource; + } @Override public boolean equals(Object o) { @@ -228,19 +231,46 @@ final class PercolatorQuery extends Query { PercolatorQuery that = (PercolatorQuery) o; - return instance.equals(that.instance); + if (!documentType.equals(that.documentType)) return false; + return documentSource.equals(that.documentSource); } @Override public int hashCode() { int result = super.hashCode(); - result = 31 * result + instance.hashCode(); + result = 31 * result + documentType.hashCode(); + result = 31 * result + documentSource.hashCode(); return result; } @Override public String toString(String s) { - return "PercolatorQuery{inner={" + percolatorQueriesQuery.toString(s) + "}}"; + return "PercolatorQuery{document_type={" + documentType + "},document_source={" + documentSource.toUtf8() + + "},inner={" + percolatorQueriesQuery.toString(s) + "}}"; } + + @Override + public long ramBytesUsed() { + long sizeInBytes = 0; + if (documentSource.hasArray()) { + sizeInBytes += documentSource.array().length; + } else { + sizeInBytes += documentSource.length(); + } + return sizeInBytes; + } + + public interface QueryRegistry { + + Leaf getQueries(LeafReaderContext ctx); + + interface Leaf { + + Query getQuery(int docId); + + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryBuilder.java new file mode 100644 index 00000000000..7ca70eef6aa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryBuilder.java @@ -0,0 +1,375 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.SlowCompositeReaderWrapper; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.Version; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperForType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.index.mapper.SourceToParse.source; + +public class PercolatorQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "percolator"; + static final PercolatorQueryBuilder PROTO = new PercolatorQueryBuilder(null, null, null, null, null, null, null, null); + + private final String documentType; + private final BytesReference document; + + private final String indexedDocumentIndex; + private final String indexedDocumentType; + private final String indexedDocumentId; + private final String indexedDocumentRouting; + private final String indexedDocumentPreference; + private final Long indexedDocumentVersion; + + public PercolatorQueryBuilder(String documentType, BytesReference document) { + if (documentType == null) { + throw new IllegalArgumentException("[document_type] is a required argument"); + } + if (document == null) { + throw new IllegalArgumentException("[document] is a required argument"); + } + this.documentType = documentType; + this.document = document; + indexedDocumentIndex = null; + indexedDocumentType = null; + indexedDocumentId = null; + indexedDocumentRouting = null; + indexedDocumentPreference = null; + indexedDocumentVersion = null; + } + + public PercolatorQueryBuilder(String documentType, String indexedDocumentIndex, String indexedDocumentType, + String indexedDocumentId, String indexedDocumentRouting, String indexedDocumentPreference, + Long indexedDocumentVersion) { + if (documentType == null) { + throw new IllegalArgumentException("[document_type] is a required argument"); + } + if (indexedDocumentIndex == null) { + throw new IllegalArgumentException("[index] is a required argument"); + } + if (indexedDocumentType == null) { + throw new IllegalArgumentException("[type] is a required argument"); + } + if (indexedDocumentId == null) { + throw new IllegalArgumentException("[id] is a required argument"); + } + this.documentType = documentType; + this.indexedDocumentIndex = indexedDocumentIndex; + this.indexedDocumentType = indexedDocumentType; + this.indexedDocumentId = indexedDocumentId; + this.indexedDocumentRouting = indexedDocumentRouting; + this.indexedDocumentPreference = indexedDocumentPreference; + this.indexedDocumentVersion = indexedDocumentVersion; + this.document = null; + } + + private PercolatorQueryBuilder(String documentType, BytesReference document, String indexedDocumentIndex, String indexedDocumentType, + String indexedDocumentId, String indexedDocumentRouting, String indexedDocumentPreference, + Long indexedDocumentVersion) { + this.documentType = documentType; + this.document = document; + this.indexedDocumentIndex = indexedDocumentIndex; + this.indexedDocumentType = indexedDocumentType; + this.indexedDocumentId = indexedDocumentId; + this.indexedDocumentRouting = indexedDocumentRouting; + this.indexedDocumentPreference = indexedDocumentPreference; + this.indexedDocumentVersion = indexedDocumentVersion; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(PercolatorQueryParser.DOCUMENT_TYPE_FIELD.getPreferredName(), documentType); + if (document != null) { + XContentType contentType = XContentFactory.xContentType(document); + if (contentType == builder.contentType()) { + builder.rawField(PercolatorQueryParser.DOCUMENT_FIELD.getPreferredName(), document); + } else { + XContentParser parser = XContentFactory.xContent(contentType).createParser(document); + parser.nextToken(); + builder.field(PercolatorQueryParser.DOCUMENT_FIELD.getPreferredName()); + builder.copyCurrentStructure(parser); + } + } + if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { + if (indexedDocumentIndex != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_INDEX.getPreferredName(), indexedDocumentIndex); + } + if (indexedDocumentType != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_TYPE.getPreferredName(), indexedDocumentType); + } + if (indexedDocumentId != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_ID.getPreferredName(), indexedDocumentId); + } + if (indexedDocumentRouting != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_ROUTING.getPreferredName(), indexedDocumentRouting); + } + if (indexedDocumentPreference != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_PREFERENCE.getPreferredName(), indexedDocumentPreference); + } + if (indexedDocumentVersion != null) { + builder.field(PercolatorQueryParser.INDEXED_DOCUMENT_FIELD_VERSION.getPreferredName(), indexedDocumentVersion); + } + } + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected PercolatorQueryBuilder doReadFrom(StreamInput in) throws IOException { + String docType = in.readString(); + String documentIndex = in.readOptionalString(); + String documentType = in.readOptionalString(); + String documentId = in.readOptionalString(); + String documentRouting = in.readOptionalString(); + String documentPreference = in.readOptionalString(); + Long documentVersion = null; + if (in.readBoolean()) { + documentVersion = in.readVLong(); + } + BytesReference documentSource = null; + if (in.readBoolean()) { + documentSource = in.readBytesReference(); + } + return new PercolatorQueryBuilder(docType, documentSource, documentIndex, documentType, documentId, + documentRouting, documentPreference, documentVersion); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(documentType); + out.writeOptionalString(indexedDocumentIndex); + out.writeOptionalString(indexedDocumentType); + out.writeOptionalString(indexedDocumentId); + out.writeOptionalString(indexedDocumentRouting); + out.writeOptionalString(indexedDocumentPreference); + if (indexedDocumentVersion != null) { + out.writeBoolean(true); + out.writeVLong(indexedDocumentVersion); + } else { + out.writeBoolean(false); + } + if (document != null) { + out.writeBoolean(true); + out.writeBytesReference(document); + } else { + out.writeBoolean(false); + } + } + + @Override + protected boolean doEquals(PercolatorQueryBuilder other) { + return Objects.equals(documentType, other.documentType) + && Objects.equals(document, other.document) + && Objects.equals(indexedDocumentIndex, other.indexedDocumentIndex) + && Objects.equals(indexedDocumentType, other.indexedDocumentType) + && Objects.equals(indexedDocumentId, other.indexedDocumentId); + } + + @Override + protected int doHashCode() { + return Objects.hash(documentType, document, indexedDocumentIndex, indexedDocumentType, indexedDocumentId); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { + if (document != null) { + return this; + } + + GetRequest getRequest = new GetRequest(indexedDocumentIndex, indexedDocumentType, indexedDocumentId); + getRequest.preference("_local"); + getRequest.routing(indexedDocumentRouting); + getRequest.preference(indexedDocumentPreference); + if (indexedDocumentVersion != null) { + getRequest.version(indexedDocumentVersion); + } + GetResponse getResponse = queryShardContext.getClient().get(getRequest).actionGet(); + if (getResponse.isExists() == false) { + throw new ResourceNotFoundException( + "indexed document [{}/{}/{}] couldn't be found", indexedDocumentIndex, indexedDocumentType, indexedDocumentId + ); + } + return new PercolatorQueryBuilder(documentType, getResponse.getSourceAsBytesRef()); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) { + throw new IllegalStateException("query builder must be rewritten first"); + } + + if (document == null) { + throw new IllegalStateException("nothing to percolator"); + } + + MapperService mapperService = context.getMapperService(); + DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); + DocumentMapper docMapper = docMapperForType.getDocumentMapper(); + + ParsedDocument doc = docMapper.parse(source(document) + .index(context.index().getName()) + .id("_temp_id") + .type(documentType)); + + Analyzer defaultAnalyzer = context.getAnalysisService().defaultIndexAnalyzer(); + final IndexSearcher docSearcher; + if (doc.docs().size() > 1) { + assert docMapper.hasNestedObjects(); + docSearcher = createMultiDocumentSearcher(docMapper, defaultAnalyzer, doc); + } else { + // TODO: we may want to bring to MemoryIndex thread local cache back... + // but I'm unsure about the real benefits. + MemoryIndex memoryIndex = new MemoryIndex(true); + indexDoc(docMapper, defaultAnalyzer, doc.rootDoc(), memoryIndex); + docSearcher = memoryIndex.createSearcher(); + docSearcher.setQueryCache(null); + } + + PercolatorQueryCache registry = context.getPercolatorQueryCache(); + if (registry == null) { + throw new QueryShardException(context, "no percolator query registry"); + } + + Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.TYPE_NAME)); + PercolatorQuery.Builder builder = new PercolatorQuery.Builder( + documentType, registry, document, docSearcher, percolateTypeQuery + ); + Settings indexSettings = registry.getIndexSettings().getSettings(); + if (indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0_alpha1)) { + builder.extractQueryTermsQuery( + PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME + ); + } + return builder.build(); + } + + public String getDocumentType() { + return documentType; + } + + public BytesReference getDocument() { + return document; + } + + private IndexSearcher createMultiDocumentSearcher(DocumentMapper docMapper, Analyzer defaultAnalyzer, ParsedDocument doc) { + IndexReader[] memoryIndices = new IndexReader[doc.docs().size()]; + List docs = doc.docs(); + int rootDocIndex = docs.size() - 1; + assert rootDocIndex > 0; + for (int i = 0; i < docs.size(); i++) { + ParseContext.Document d = docs.get(i); + MemoryIndex memoryIndex = new MemoryIndex(true); + indexDoc(docMapper, defaultAnalyzer, d, memoryIndex); + memoryIndices[i] = memoryIndex.createSearcher().getIndexReader(); + } + try { + MultiReader mReader = new MultiReader(memoryIndices, true); + LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); + final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { + + @Override + public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { + BooleanQuery.Builder bq = new BooleanQuery.Builder(); + bq.add(query, BooleanClause.Occur.MUST); + bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); + return super.createNormalizedWeight(bq.build(), needsScores); + } + + }; + slowSearcher.setQueryCache(null); + return slowSearcher; + } catch (IOException e) { + throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); + } + } + + private void indexDoc(DocumentMapper documentMapper, Analyzer defaultAnalyzer, ParseContext.Document document, + MemoryIndex memoryIndex) { + for (IndexableField field : document.getFields()) { + if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { + continue; + } + + Analyzer analyzer = defaultAnalyzer; + if (documentMapper != null && documentMapper.mappers().getMapper(field.name()) != null) { + analyzer = documentMapper.mappers().indexAnalyzer(); + } + try { + try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { + if (tokenStream != null) { + memoryIndex.addField(field.name(), tokenStream, field.boost()); + } + } + } catch (IOException e) { + throw new ElasticsearchException("Failed to create token stream", e); + } + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryParser.java new file mode 100644 index 00000000000..a559db59927 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/query/PercolatorQueryParser.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class PercolatorQueryParser implements QueryParser { + + public static final ParseField DOCUMENT_FIELD = new ParseField("document"); + public static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type"); + public static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index"); + public static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type"); + public static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id"); + public static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing"); + public static final ParseField INDEXED_DOCUMENT_FIELD_PREFERENCE = new ParseField("preference"); + public static final ParseField INDEXED_DOCUMENT_FIELD_VERSION = new ParseField("version"); + + @Override + public String[] names() { + return new String[]{PercolatorQueryBuilder.NAME}; + } + + @Override + public PercolatorQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + + String documentType = null; + + String indexedDocumentIndex = null; + String indexedDocumentType = null; + String indexedDocumentId = null; + String indexedDocumentRouting = null; + String indexedDocumentPreference = null; + Long indexedDocumentVersion = null; + + BytesReference source = null; + + String queryName = null; + String currentFieldName = null; + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (parseContext.parseFieldMatcher().match(currentFieldName, DOCUMENT_FIELD)) { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.copyCurrentStructure(parser); + builder.flush(); + source = builder.bytes(); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolatorQueryBuilder.NAME + + "] query does not support [" + token + "]"); + } + } else if (token.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, DOCUMENT_TYPE_FIELD)) { + documentType = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_INDEX)) { + indexedDocumentIndex = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_TYPE)) { + indexedDocumentType = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_ID)) { + indexedDocumentId = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_ROUTING)) { + indexedDocumentRouting = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_PREFERENCE)) { + indexedDocumentPreference = parser.text(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, INDEXED_DOCUMENT_FIELD_VERSION)) { + indexedDocumentVersion = parser.longValue(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { + boost = parser.floatValue(); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolatorQueryBuilder.NAME + + "] query does not support [" + currentFieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + PercolatorQueryBuilder.NAME + + "] query does not support [" + token + "]"); + } + } + + if (documentType == null) { + throw new IllegalArgumentException("[" + PercolatorQueryBuilder.NAME + "] query is missing required [" + + DOCUMENT_TYPE_FIELD.getPreferredName() + "] parameter"); + } + + PercolatorQueryBuilder queryBuilder; + if (source != null) { + queryBuilder = new PercolatorQueryBuilder(documentType, source); + } else if (indexedDocumentId != null) { + queryBuilder = new PercolatorQueryBuilder(documentType, indexedDocumentIndex, indexedDocumentType, + indexedDocumentId, indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); + } else { + throw new IllegalArgumentException("[" + PercolatorQueryBuilder.NAME + "] query, nothing to percolate"); + } + queryBuilder.queryName(queryName); + queryBuilder.boost(boost); + return queryBuilder; + } + + @Override + public PercolatorQueryBuilder getBuilderPrototype() { + return PercolatorQueryBuilder.PROTO; + } + +} diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 21c1f3ff695..f04f03fcbcd 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -832,6 +832,18 @@ public abstract class QueryBuilders { return new ExistsQueryBuilder(name); } + public static PercolatorQueryBuilder percolatorQuery(String documentType, BytesReference document) { + return new PercolatorQueryBuilder(documentType, document); + } + + public static PercolatorQueryBuilder percolatorQuery(String documentType, String indexedDocumentIndex, + String indexedDocumentType, String indexedDocumentId, + String indexedDocumentRouting, String indexedDocumentPreference, + Long indexedDocumentVersion) { + return new PercolatorQueryBuilder(documentType, indexedDocumentIndex, indexedDocumentType, indexedDocumentId, + indexedDocumentRouting, indexedDocumentPreference, indexedDocumentVersion); + } + private QueryBuilders() { } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryParser.java b/core/src/main/java/org/elasticsearch/index/query/QueryParser.java index 0a3f6d6147c..1226564bb3b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryParser.java @@ -33,7 +33,7 @@ public interface QueryParser> { String[] names(); /** - * Creates a new {@link QueryBuilder} from the query held by the {@link QueryShardContext} + * Creates a new {@link QueryBuilder} from the query held by the {@link QueryParseContext} * in {@link org.elasticsearch.common.xcontent.XContent} format * * @param parseContext diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index e057aff06b1..11164659b3f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import org.elasticsearch.client.Client; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.script.ScriptService; @@ -31,6 +32,7 @@ public class QueryRewriteContext { protected final IndexSettings indexSettings; protected final IndicesQueriesRegistry indicesQueriesRegistry; protected final QueryParseContext parseContext; + protected FieldStatsProvider fieldStatsProvider; public QueryRewriteContext(IndexSettings indexSettings, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry) { this.scriptService = scriptService; @@ -39,6 +41,14 @@ public class QueryRewriteContext { this.parseContext = new QueryParseContext(indicesQueriesRegistry); } + public void setFieldStatsProvider(FieldStatsProvider fieldStatsProvider) { + this.fieldStatsProvider = fieldStatsProvider; + } + + public FieldStatsProvider getFieldStatsProvider() { + return fieldStatsProvider; + } + /** * Returns a clients to fetch resources from local or remove nodes. */ diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 6acd5272f89..63eff82ddb0 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -44,9 +44,9 @@ import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.similarity.SimilarityService; @@ -87,13 +87,15 @@ public class QueryShardContext extends QueryRewriteContext { private final Map namedQueries = new HashMap<>(); private final MapperQueryParser queryParser = new MapperQueryParser(this); + private final IndicesQueriesRegistry indicesQueriesRegistry; + private final PercolatorQueryCache percolatorQueryCache; private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; boolean isFilter; // pkg private for testing public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService, MapperService mapperService, SimilarityService similarityService, ScriptService scriptService, - final IndicesQueriesRegistry indicesQueriesRegistry) { + final IndicesQueriesRegistry indicesQueriesRegistry, PercolatorQueryCache percolatorQueryCache) { super(indexSettings, scriptService, indicesQueriesRegistry); this.indexSettings = indexSettings; this.similarityService = similarityService; @@ -101,17 +103,20 @@ public class QueryShardContext extends QueryRewriteContext { this.bitsetFilterCache = bitsetFilterCache; this.indexFieldDataService = indexFieldDataService; this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); - + this.indicesQueriesRegistry = indicesQueriesRegistry; + this.percolatorQueryCache = percolatorQueryCache; + this.nestedScope = new NestedScope(); } public QueryShardContext(QueryShardContext source) { - this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.indicesQueriesRegistry); + this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.percolatorQueryCache); this.types = source.getTypes(); } + @Override public QueryShardContext clone() { - return new QueryShardContext(indexSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry); + return new QueryShardContext(indexSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, scriptService, indicesQueriesRegistry, percolatorQueryCache); } public void parseFieldMatcher(ParseFieldMatcher parseFieldMatcher) { @@ -148,6 +153,10 @@ public class QueryShardContext extends QueryRewriteContext { return mapperService; } + public PercolatorQueryCache getPercolatorQueryCache() { + return percolatorQueryCache; + } + public Similarity getSearchSimilarity() { return similarityService != null ? similarityService.similarity(mapperService) : null; } diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index cd99bec0f74..b1132c42ea9 100644 --- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.joda.time.DateTimeZone; @@ -253,6 +254,36 @@ public class RangeQueryBuilder extends AbstractQueryBuilder i return NAME; } + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + FieldStatsProvider fieldStatsProvider = queryRewriteContext.getFieldStatsProvider(); + // If the fieldStatsProvider is null we are not on the shard and cannot + // rewrite so just return without rewriting + if (fieldStatsProvider != null) { + DateMathParser dateMathParser = format == null ? null : new DateMathParser(format); + FieldStatsProvider.Relation relation = fieldStatsProvider.isFieldWithinQuery(fieldName, from, to, includeLower, includeUpper, + timeZone, dateMathParser); + switch (relation) { + case DISJOINT: + return new MatchNoneQueryBuilder(); + case WITHIN: + if (from != null || to != null) { + RangeQueryBuilder newRangeQuery = new RangeQueryBuilder(fieldName); + newRangeQuery.from(null); + newRangeQuery.to(null); + newRangeQuery.format = format; + newRangeQuery.timeZone = timeZone; + return newRangeQuery; + } else { + return this; + } + case INTERSECTS: + break; + } + } + return this; + } + @Override protected Query doToQuery(QueryShardContext context) throws IOException { Query query = null; diff --git a/core/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java b/core/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java index 74a905919f7..0467e459718 100644 --- a/core/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java +++ b/core/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java @@ -27,7 +27,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceParseElement; import org.elasticsearch.search.highlight.HighlighterParseElement; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SubSearchContext; -import org.elasticsearch.search.sort.SortParseElement; +import org.elasticsearch.search.sort.SortBuilder; import java.io.IOException; @@ -35,7 +35,6 @@ public class InnerHitsQueryParserHelper { public static final InnerHitsQueryParserHelper INSTANCE = new InnerHitsQueryParserHelper(); - private static final SortParseElement sortParseElement = new SortParseElement(); private static final FetchSourceParseElement sourceParseElement = new FetchSourceParseElement(); private static final HighlighterParseElement highlighterParseElement = new HighlighterParseElement(); private static final ScriptFieldsParseElement scriptFieldsParseElement = new ScriptFieldsParseElement(); @@ -54,10 +53,10 @@ public class InnerHitsQueryParserHelper { if ("name".equals(fieldName)) { innerHitName = parser.textOrNull(); } else { - parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sortParseElement, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); + parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); } } else { - parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sortParseElement, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); + parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); } } } catch (Exception e) { @@ -67,10 +66,10 @@ public class InnerHitsQueryParserHelper { } public static void parseCommonInnerHitOptions(XContentParser parser, XContentParser.Token token, String fieldName, SubSearchContext subSearchContext, - SortParseElement sortParseElement, FetchSourceParseElement sourceParseElement, HighlighterParseElement highlighterParseElement, + FetchSourceParseElement sourceParseElement, HighlighterParseElement highlighterParseElement, ScriptFieldsParseElement scriptFieldsParseElement, FieldDataFieldsParseElement fieldDataFieldsParseElement) throws Exception { if ("sort".equals(fieldName)) { - sortParseElement.parse(parser, subSearchContext); + SortBuilder.parseSort(parser, subSearchContext); } else if ("_source".equals(fieldName)) { sourceParseElement.parse(parser, subSearchContext); } else if (token == XContentParser.Token.START_OBJECT) { diff --git a/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java deleted file mode 100644 index 9923728e3bd..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query.support; - -import org.apache.lucene.search.Query; -import org.apache.lucene.search.join.BitSetProducer; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; - -/** - * A helper that helps with parsing inner queries of the nested query. - * 1) Takes into account that type nested path can appear before or after the inner query - * 2) Updates the {@link NestedScope} when parsing the inner query. - */ -public class NestedInnerQueryParseSupport { - - protected final QueryShardContext shardContext; - protected final QueryParseContext parseContext; - - private BytesReference source; - private Query innerQuery; - private Query innerFilter; - protected String path; - - private boolean filterParsed = false; - private boolean queryParsed = false; - protected boolean queryFound = false; - protected boolean filterFound = false; - - protected BitSetProducer parentFilter; - protected Query childFilter; - - protected ObjectMapper nestedObjectMapper; - private ObjectMapper parentObjectMapper; - - public NestedInnerQueryParseSupport(XContentParser parser, SearchContext searchContext) { - shardContext = searchContext.getQueryShardContext(); - parseContext = shardContext.parseContext(); - shardContext.reset(parser); - - } - - public NestedInnerQueryParseSupport(QueryShardContext context) { - this.parseContext = context.parseContext(); - this.shardContext = context; - } - - public void query() throws IOException { - if (path != null) { - setPathLevel(); - try { - innerQuery = parseContext.parseInnerQueryBuilder().toQuery(this.shardContext); - } finally { - resetPathLevel(); - } - queryParsed = true; - } else { - source = XContentFactory.smileBuilder().copyCurrentStructure(parseContext.parser()).bytes(); - } - queryFound = true; - } - - public void filter() throws IOException { - if (path != null) { - setPathLevel(); - try { - innerFilter = QueryBuilder.rewriteQuery(parseContext.parseInnerQueryBuilder(), - this.shardContext).toFilter(this.shardContext); - } finally { - resetPathLevel(); - } - filterParsed = true; - } else { - source = XContentFactory.smileBuilder().copyCurrentStructure(parseContext.parser()).bytes(); - } - filterFound = true; - } - - public Query getInnerQuery() throws IOException { - if (queryParsed) { - return innerQuery; - } else { - if (path == null) { - throw new QueryShardException(shardContext, "[nested] requires 'path' field"); - } - if (!queryFound) { - throw new QueryShardException(shardContext, "[nested] requires either 'query' or 'filter' field"); - } - - XContentParser old = parseContext.parser(); - try { - XContentParser innerParser = XContentHelper.createParser(source); - parseContext.parser(innerParser); - setPathLevel(); - try { - innerQuery = parseContext.parseInnerQueryBuilder().toQuery(this.shardContext); - } finally { - resetPathLevel(); - } - queryParsed = true; - return innerQuery; - } finally { - parseContext.parser(old); - } - } - } - - public Query getInnerFilter() throws IOException { - if (filterParsed) { - return innerFilter; - } else { - if (path == null) { - throw new QueryShardException(shardContext, "[nested] requires 'path' field"); - } - if (!filterFound) { - throw new QueryShardException(shardContext, "[nested] requires either 'query' or 'filter' field"); - } - - setPathLevel(); - XContentParser old = parseContext.parser(); - try { - XContentParser innerParser = XContentHelper.createParser(source); - parseContext.parser(innerParser); - innerFilter = QueryBuilder.rewriteQuery(parseContext.parseInnerQueryBuilder(), - this.shardContext).toFilter(this.shardContext); - filterParsed = true; - return innerFilter; - } finally { - resetPathLevel(); - parseContext.parser(old); - } - } - } - - public void setPath(String path) { - this.path = path; - nestedObjectMapper = shardContext.getObjectMapper(path); - if (nestedObjectMapper == null) { - throw new QueryShardException(shardContext, "[nested] failed to find nested object under path [" + path + "]"); - } - if (!nestedObjectMapper.nested().isNested()) { - throw new QueryShardException(shardContext, "[nested] nested object under path [" + path + "] is not of nested type"); - } - } - - public String getPath() { - return path; - } - - public ObjectMapper getNestedObjectMapper() { - return nestedObjectMapper; - } - - public boolean queryFound() { - return queryFound; - } - - public boolean filterFound() { - return filterFound; - } - - public ObjectMapper getParentObjectMapper() { - return parentObjectMapper; - } - - private void setPathLevel() { - ObjectMapper objectMapper = shardContext.nestedScope().getObjectMapper(); - if (objectMapper == null) { - parentFilter = shardContext.bitsetFilter(Queries.newNonNestedFilter()); - } else { - parentFilter = shardContext.bitsetFilter(objectMapper.nestedTypeFilter()); - } - childFilter = nestedObjectMapper.nestedTypeFilter(); - parentObjectMapper = shardContext.nestedScope().nextLevel(nestedObjectMapper); - } - - private void resetPathLevel() { - shardContext.nestedScope().previousLevel(); - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 979bfba605f..9cd587704cb 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; import java.io.IOException; -import java.util.List; public class MatchQuery { @@ -336,10 +335,10 @@ public class MatchQuery { return prefixQuery; } else if (query instanceof MultiPhraseQuery) { MultiPhraseQuery pq = (MultiPhraseQuery)query; - List terms = pq.getTermArrays(); + Term[][] terms = pq.getTermArrays(); int[] positions = pq.getPositions(); - for (int i = 0; i < terms.size(); i++) { - prefixQuery.add(terms.get(i), positions[i]); + for (int i = 0; i < terms.length; i++) { + prefixQuery.add(terms[i], positions[i]); } return prefixQuery; } else if (query instanceof TermQuery) { diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java index c35a4cdbadb..bcabd7c5e12 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java @@ -51,6 +51,10 @@ public class SearchStats implements Streamable, ToXContent { private long scrollTimeInMillis; private long scrollCurrent; + private long suggestCount; + private long suggestTimeInMillis; + private long suggestCurrent; + Stats() { } @@ -58,7 +62,8 @@ public class SearchStats implements Streamable, ToXContent { public Stats( long queryCount, long queryTimeInMillis, long queryCurrent, long fetchCount, long fetchTimeInMillis, long fetchCurrent, - long scrollCount, long scrollTimeInMillis, long scrollCurrent + long scrollCount, long scrollTimeInMillis, long scrollCurrent, + long suggestCount, long suggestTimeInMillis, long suggestCurrent ) { this.queryCount = queryCount; this.queryTimeInMillis = queryTimeInMillis; @@ -71,13 +76,19 @@ public class SearchStats implements Streamable, ToXContent { this.scrollCount = scrollCount; this.scrollTimeInMillis = scrollTimeInMillis; this.scrollCurrent = scrollCurrent; + + this.suggestCount = suggestCount; + this.suggestTimeInMillis = suggestTimeInMillis; + this.suggestCurrent = suggestCurrent; + } public Stats(Stats stats) { this( stats.queryCount, stats.queryTimeInMillis, stats.queryCurrent, stats.fetchCount, stats.fetchTimeInMillis, stats.fetchCurrent, - stats.scrollCount, stats.scrollTimeInMillis, stats.scrollCurrent + stats.scrollCount, stats.scrollTimeInMillis, stats.scrollCurrent, + stats.suggestCount, stats.suggestTimeInMillis, stats.suggestCurrent ); } @@ -93,6 +104,10 @@ public class SearchStats implements Streamable, ToXContent { scrollCount += stats.scrollCount; scrollTimeInMillis += stats.scrollTimeInMillis; scrollCurrent += stats.scrollCurrent; + + suggestCount += stats.suggestCount; + suggestTimeInMillis += stats.suggestTimeInMillis; + suggestCurrent += stats.suggestCurrent; } public long getQueryCount() { @@ -143,6 +158,22 @@ public class SearchStats implements Streamable, ToXContent { return scrollCurrent; } + public long getSuggestCount() { + return suggestCount; + } + + public long getSuggestTimeInMillis() { + return suggestTimeInMillis; + } + + public TimeValue getSuggestTime() { + return new TimeValue(suggestTimeInMillis); + } + + public long getSuggestCurrent() { + return suggestCurrent; + } + public static Stats readStats(StreamInput in) throws IOException { Stats stats = new Stats(); stats.readFrom(in); @@ -162,6 +193,10 @@ public class SearchStats implements Streamable, ToXContent { scrollCount = in.readVLong(); scrollTimeInMillis = in.readVLong(); scrollCurrent = in.readVLong(); + + suggestCount = in.readVLong(); + suggestTimeInMillis = in.readVLong(); + suggestCurrent = in.readVLong(); } @Override @@ -177,6 +212,10 @@ public class SearchStats implements Streamable, ToXContent { out.writeVLong(scrollCount); out.writeVLong(scrollTimeInMillis); out.writeVLong(scrollCurrent); + + out.writeVLong(suggestCount); + out.writeVLong(suggestTimeInMillis); + out.writeVLong(suggestCurrent); } @Override @@ -193,6 +232,10 @@ public class SearchStats implements Streamable, ToXContent { builder.timeValueField(Fields.SCROLL_TIME_IN_MILLIS, Fields.SCROLL_TIME, scrollTimeInMillis); builder.field(Fields.SCROLL_CURRENT, scrollCurrent); + builder.field(Fields.SUGGEST_TOTAL, suggestCount); + builder.timeValueField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, suggestTimeInMillis); + builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + return builder; } } @@ -292,6 +335,10 @@ public class SearchStats implements Streamable, ToXContent { static final XContentBuilderString SCROLL_TIME = new XContentBuilderString("scroll_time"); static final XContentBuilderString SCROLL_TIME_IN_MILLIS = new XContentBuilderString("scroll_time_in_millis"); static final XContentBuilderString SCROLL_CURRENT = new XContentBuilderString("scroll_current"); + static final XContentBuilderString SUGGEST_TOTAL = new XContentBuilderString("suggest_total"); + static final XContentBuilderString SUGGEST_TIME = new XContentBuilderString("suggest_time"); + static final XContentBuilderString SUGGEST_TIME_IN_MILLIS = new XContentBuilderString("suggest_time_in_millis"); + static final XContentBuilderString SUGGEST_CURRENT = new XContentBuilderString("suggest_current"); } public static SearchStats readSearchStats(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java b/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java index 1a155d17964..748bb01bd54 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java @@ -23,13 +23,13 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.SearchSlowLog; import org.elasticsearch.search.internal.SearchContext; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import static java.util.Collections.emptyMap; @@ -72,64 +72,51 @@ public final class ShardSearchStats { } public void onPreQueryPhase(SearchContext searchContext) { - totalStats.queryCurrent.inc(); - if (searchContext.groupStats() != null) { - for (int i = 0; i < searchContext.groupStats().size(); i++) { - groupStats(searchContext.groupStats().get(i)).queryCurrent.inc(); + computeStats(searchContext, statsHolder -> { + if (searchContext.hasOnlySuggest()) { + statsHolder.suggestCurrent.inc(); + } else { + statsHolder.queryCurrent.inc(); } - } + }); } public void onFailedQueryPhase(SearchContext searchContext) { - totalStats.queryCurrent.dec(); - if (searchContext.groupStats() != null) { - for (int i = 0; i < searchContext.groupStats().size(); i++) { - groupStats(searchContext.groupStats().get(i)).queryCurrent.dec(); + computeStats(searchContext, statsHolder -> { + if (searchContext.hasOnlySuggest()) { + statsHolder.suggestCurrent.dec(); + } else { + statsHolder.queryCurrent.dec(); } - } + }); } public void onQueryPhase(SearchContext searchContext, long tookInNanos) { - totalStats.queryMetric.inc(tookInNanos); - totalStats.queryCurrent.dec(); - if (searchContext.groupStats() != null) { - for (int i = 0; i < searchContext.groupStats().size(); i++) { - StatsHolder statsHolder = groupStats(searchContext.groupStats().get(i)); + computeStats(searchContext, statsHolder -> { + if (searchContext.hasOnlySuggest()) { + statsHolder.suggestMetric.inc(tookInNanos); + statsHolder.suggestCurrent.dec(); + } else { statsHolder.queryMetric.inc(tookInNanos); statsHolder.queryCurrent.dec(); } - } + }); slowLogSearchService.onQueryPhase(searchContext, tookInNanos); } public void onPreFetchPhase(SearchContext searchContext) { - totalStats.fetchCurrent.inc(); - if (searchContext.groupStats() != null) { - for (int i = 0; i < searchContext.groupStats().size(); i++) { - groupStats(searchContext.groupStats().get(i)).fetchCurrent.inc(); - } - } + computeStats(searchContext, statsHolder -> statsHolder.fetchCurrent.inc()); } public void onFailedFetchPhase(SearchContext searchContext) { - totalStats.fetchCurrent.dec(); - if (searchContext.groupStats() != null) { - for (int i = 0; i < searchContext.groupStats().size(); i++) { - groupStats(searchContext.groupStats().get(i)).fetchCurrent.dec(); - } - } + computeStats(searchContext, statsHolder -> statsHolder.fetchCurrent.dec()); } public void onFetchPhase(SearchContext searchContext, long tookInNanos) { - totalStats.fetchMetric.inc(tookInNanos); - totalStats.fetchCurrent.dec(); - if (searchContext.groupStats() != null) { - for (int i = 0; i < searchContext.groupStats().size(); i++) { - StatsHolder statsHolder = groupStats(searchContext.groupStats().get(i)); - statsHolder.fetchMetric.inc(tookInNanos); - statsHolder.fetchCurrent.dec(); - } - } + computeStats(searchContext, statsHolder -> { + statsHolder.fetchMetric.inc(tookInNanos); + statsHolder.fetchCurrent.dec(); + }); slowLogSearchService.onFetchPhase(searchContext, tookInNanos); } @@ -149,6 +136,15 @@ public final class ShardSearchStats { } } + private void computeStats(SearchContext searchContext, Consumer consumer) { + consumer.accept(totalStats); + if (searchContext.groupStats() != null) { + for (int i = 0; i < searchContext.groupStats().size(); i++) { + consumer.accept(groupStats(searchContext.groupStats().get(i))); + } + } + } + private StatsHolder groupStats(String group) { StatsHolder stats = groupsStats.get(group); if (stats == null) { @@ -184,26 +180,30 @@ public final class ShardSearchStats { public final MeanMetric queryMetric = new MeanMetric(); public final MeanMetric fetchMetric = new MeanMetric(); public final MeanMetric scrollMetric = new MeanMetric(); + public final MeanMetric suggestMetric = new MeanMetric(); public final CounterMetric queryCurrent = new CounterMetric(); public final CounterMetric fetchCurrent = new CounterMetric(); public final CounterMetric scrollCurrent = new CounterMetric(); + public final CounterMetric suggestCurrent = new CounterMetric(); public SearchStats.Stats stats() { return new SearchStats.Stats( queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(), fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count(), - scrollMetric.count(), TimeUnit.NANOSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count() + scrollMetric.count(), TimeUnit.NANOSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count(), + suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), suggestCurrent.count() ); } public long totalCurrent() { - return queryCurrent.count() + fetchCurrent.count() + scrollCurrent.count(); + return queryCurrent.count() + fetchCurrent.count() + scrollCurrent.count() + suggestCurrent.count(); } public void clear() { queryMetric.clear(); fetchMetric.clear(); scrollMetric.clear(); + suggestMetric.clear(); } } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java index 524266420fb..adae6caf452 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java @@ -49,7 +49,7 @@ import java.util.Map; * be stored as payloads to numeric doc values. */ public final class ElasticsearchMergePolicy extends MergePolicy { - + private static ESLogger logger = Loggers.getLogger(ElasticsearchMergePolicy.class); private final MergePolicy delegate; @@ -69,7 +69,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { /** Return an "upgraded" view of the reader. */ static CodecReader filter(CodecReader reader) throws IOException { - // TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid? + // TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid? // the previous code never did this, so some indexes carry around trash. return reader; } @@ -155,7 +155,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { // TODO: Use IndexUpgradeMergePolicy instead. We should be comparing codecs, // for now we just assume every minor upgrade has a new format. - logger.debug("Adding segment " + info.info.name + " to be upgraded"); + logger.debug("Adding segment {} to be upgraded", info.info.name); spec.add(new OneMerge(Collections.singletonList(info))); } @@ -163,14 +163,14 @@ public final class ElasticsearchMergePolicy extends MergePolicy { if (spec.merges.size() == MAX_CONCURRENT_UPGRADE_MERGES) { // hit our max upgrades, so return the spec. we will get a cascaded call to continue. - logger.debug("Returning " + spec.merges.size() + " merges for upgrade"); + logger.debug("Returning {} merges for upgrade", spec.merges.size()); return spec; } } // We must have less than our max upgrade merges, so the next return will be our last in upgrading mode. if (spec.merges.isEmpty() == false) { - logger.debug("Returning " + spec.merges.size() + " merges for end of upgrade"); + logger.debug("Returning {} merges for end of upgrade", spec.merges.size()); return spec; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java index 626e72acf41..e632c0669f6 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java @@ -33,12 +33,12 @@ public class IllegalIndexShardStateException extends ElasticsearchException { private final IndexShardState currentState; - public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg) { - this(shardId, currentState, msg, null); + public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Object... args) { + this(shardId, currentState, msg, null, args); } - public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Throwable ex) { - super("CurrentState[" + currentState + "] " + msg, ex); + public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Throwable ex, Object... args) { + super("CurrentState[" + currentState + "] " + msg, ex, args); setShard(shardId); this.currentState = currentState; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 168f381d2d7..5e9d078a078 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -41,19 +41,18 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.SuspendableRefContainer; -import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.SearchSlowLog; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; @@ -83,9 +82,7 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; -import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; @@ -96,8 +93,6 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store.MetadataSnapshot; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.index.suggest.stats.ShardSuggestMetric; -import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogStats; @@ -106,7 +101,6 @@ import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndexingMemoryController; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.search.suggest.completion.CompletionFieldStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.threadpool.ThreadPool; @@ -122,7 +116,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -140,9 +133,7 @@ public class IndexShard extends AbstractIndexShardComponent { private final ShardIndexWarmerService shardWarmerService; private final ShardRequestCache shardQueryCache; private final ShardFieldData shardFieldData; - private final PercolatorQueriesRegistry percolatorQueriesRegistry; private final IndexFieldDataService indexFieldDataService; - private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric(); private final ShardBitsetFilterCache shardBitsetFilterCache; private final Object mutex = new Object(); private final String checkIndexOnStartup; @@ -154,14 +145,16 @@ public class IndexShard extends AbstractIndexShardComponent { private final TranslogConfig translogConfig; private final IndexEventListener indexEventListener; - /** How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this - * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents - * being indexed/deleted. */ + /** + * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this + * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents + * being indexed/deleted. + */ private final AtomicLong writingBytes = new AtomicLong(); - private volatile ScheduledFuture refreshScheduledFuture; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; + protected volatile long primaryTerm; protected final AtomicReference currentEngineReference = new AtomicReference<>(); protected final EngineFactory engineFactory; @@ -201,7 +194,8 @@ public class IndexShard extends AbstractIndexShardComponent { public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory, - IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, SearchSlowLog slowLog, Engine.Warmer warmer, IndexingOperationListener... listeners) { + IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, ThreadPool threadPool, BigArrays bigArrays, + SearchSlowLog slowLog, Engine.Warmer warmer, IndexingOperationListener... listeners) { super(shardId, indexSettings); final Settings settings = indexSettings.getSettings(); this.codecService = new CodecService(mapperService, logger); @@ -212,7 +206,7 @@ public class IndexShard extends AbstractIndexShardComponent { this.engineFactory = engineFactory == null ? new InternalEngineFactory() : engineFactory; this.store = store; this.indexEventListener = indexEventListener; - this.threadPool = provider.getThreadPool(); + this.threadPool = threadPool; this.mapperService = mapperService; this.indexCache = indexCache; this.internalIndexingStats = new InternalIndexingStats(); @@ -233,7 +227,7 @@ public class IndexShard extends AbstractIndexShardComponent { this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, - provider.getBigArrays()); + bigArrays); final QueryCachingPolicy cachingPolicy; // the query cache is a node-level thing, however we want the most popular filters // to be computed on a per-shard basis @@ -246,15 +240,16 @@ public class IndexShard extends AbstractIndexShardComponent { this.engineConfig = newEngineConfig(translogConfig, cachingPolicy); this.suspendableRefContainer = new SuspendableRefContainer(); this.searcherWrapper = indexSearcherWrapper; - QueryShardContext queryShardContext = new QueryShardContext(indexSettings, indexCache.bitsetFilterCache(), indexFieldDataService, mapperService, similarityService, provider.getScriptService(), provider.getIndicesQueriesRegistry()); - this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryShardContext); + this.primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id()); } public Store store() { return this.store; } - /** returns true if this shard supports indexing (i.e., write) operations. */ + /** + * returns true if this shard supports indexing (i.e., write) operations. + */ public boolean canIndex() { return true; } @@ -263,10 +258,6 @@ public class IndexShard extends AbstractIndexShardComponent { return this.getService; } - public ShardSuggestMetric getSuggestMetric() { - return shardSuggestMetric; - } - public ShardBitsetFilterCache shardBitsetFilterCache() { return shardBitsetFilterCache; } @@ -295,6 +286,30 @@ public class IndexShard extends AbstractIndexShardComponent { return this.shardFieldData; } + + /** + * Returns the primary term the index shard is on. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)} + */ + public long getPrimaryTerm() { + return this.primaryTerm; + } + + /** + * notifies the shard of an increase in the primary term + */ + public void updatePrimaryTerm(final long newTerm) { + synchronized (mutex) { + if (newTerm != primaryTerm) { + assert shardRouting.primary() == false : "a primary shard should never update it's term. shard: " + shardRouting + + " current term [" + primaryTerm + "] new term [" + newTerm + "]"; + assert newTerm > primaryTerm : "primary terms can only go up. current [" + primaryTerm + "], new [" + newTerm + "]"; + primaryTerm = newTerm; + } + } + + + } + /** * Returns the latest cluster routing entry received with this shard. Might be null if the * shard was just created. @@ -313,12 +328,12 @@ public class IndexShard extends AbstractIndexShardComponent { * unless explicitly disabled. * * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted - * @throws IOException if shard state could not be persisted + * @throws IOException if shard state could not be persisted */ public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) throws IOException { final ShardRouting currentRouting = this.shardRouting; if (!newRouting.shardId().equals(shardId())) { - throw new IllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]"); + throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + ""); } if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) { throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting); @@ -435,9 +450,7 @@ public class IndexShard extends AbstractIndexShardComponent { public Engine.Index prepareIndexOnPrimary(SourceToParse source, long version, VersionType versionType) { try { - if (shardRouting.primary() == false) { - throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary"); - } + verifyPrimary(); return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.PRIMARY); } catch (Throwable t) { verifyNotClosed(t); @@ -447,6 +460,7 @@ public class IndexShard extends AbstractIndexShardComponent { public Engine.Index prepareIndexOnReplica(SourceToParse source, long version, VersionType versionType) { try { + verifyReplicationTarget(); return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.REPLICA); } catch (Throwable t) { verifyNotClosed(t); @@ -476,12 +490,8 @@ public class IndexShard extends AbstractIndexShardComponent { if (logger.isTraceEnabled()) { logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); } - final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(index); Engine engine = getEngine(); created = engine.index(index); - if (isPercolatorQuery) { - percolatorQueriesRegistry.updatePercolateQuery(engine, index.id()); - } index.endTime(System.nanoTime()); } catch (Throwable ex) { indexingOperationListeners.postIndex(index, ex); @@ -494,9 +504,7 @@ public class IndexShard extends AbstractIndexShardComponent { } public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) { - if (shardRouting.primary() == false) { - throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary"); - } + verifyPrimary(); final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); return prepareDelete(type, id, documentMapper.uidMapper().term(Uid.createUid(type, id)), version, versionType, Engine.Operation.Origin.PRIMARY); } @@ -519,12 +527,8 @@ public class IndexShard extends AbstractIndexShardComponent { if (logger.isTraceEnabled()) { logger.trace("delete [{}]", delete.uid().text()); } - final boolean isPercolatorQuery = percolatorQueriesRegistry.isPercolatorQuery(delete); Engine engine = getEngine(); engine.delete(delete); - if (isPercolatorQuery) { - percolatorQueriesRegistry.updatePercolateQuery(engine, delete.id()); - } delete.endTime(System.nanoTime()); } catch (Throwable ex) { indexingOperationListeners.postDelete(delete, ex); @@ -539,7 +543,9 @@ public class IndexShard extends AbstractIndexShardComponent { return getEngine().get(get, this::acquireSearcher); } - /** Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link EngineClosedException}. */ + /** + * Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link EngineClosedException}. + */ public void refresh(String source) { verifyNotClosed(); if (canIndex()) { @@ -562,7 +568,9 @@ public class IndexShard extends AbstractIndexShardComponent { } } - /** Returns how many bytes we are currently moving from heap to disk */ + /** + * Returns how many bytes we are currently moving from heap to disk + */ public long getWritingBytes() { return writingBytes.get(); } @@ -644,18 +652,10 @@ public class IndexShard extends AbstractIndexShardComponent { return shardFieldData.stats(fields); } - public PercolatorQueriesRegistry percolateRegistry() { - return percolatorQueriesRegistry; - } - public TranslogStats translogStats() { return getEngine().getTranslog().stats(); } - public SuggestStats suggestStats() { - return shardSuggestMetric.stats(); - } - public CompletionStats completionStats(String... fields) { CompletionStats completionStats = new CompletionStats(); try (final Engine.Searcher currentSearcher = acquireSearcher("completion_stats")) { @@ -712,7 +712,7 @@ public class IndexShard extends AbstractIndexShardComponent { false, true, upgrade.upgradeOnlyAncientSegments()); org.apache.lucene.util.Version version = minimumCompatibleVersion(); if (logger.isTraceEnabled()) { - logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version); + logger.trace("upgraded segments for {} from version {} to version {}", shardId, previousVersion, version); } return version; @@ -790,10 +790,6 @@ public class IndexShard extends AbstractIndexShardComponent { public void close(String reason, boolean flushEngine) throws IOException { synchronized (mutex) { try { - if (state != IndexShardState.CLOSED) { - FutureUtils.cancel(refreshScheduledFuture); - refreshScheduledFuture = null; - } changeState(IndexShardState.CLOSED, reason); } finally { final Engine engine = this.currentEngineReference.getAndSet(null); @@ -802,18 +798,15 @@ public class IndexShard extends AbstractIndexShardComponent { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times - IOUtils.close(engine, percolatorQueriesRegistry); + IOUtils.close(engine); } } } } public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { - if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) { + if (mapperService.hasMapping(PercolatorFieldMapper.TYPE_NAME)) { refresh("percolator_load_queries"); - try (Engine.Searcher searcher = getEngine().acquireSearcher("percolator_load_queries")) { - this.percolatorQueriesRegistry.loadQueries(searcher.reader()); - } } synchronized (mutex) { if (state == IndexShardState.CLOSED) { @@ -985,6 +978,22 @@ public class IndexShard extends AbstractIndexShardComponent { } } + private void verifyPrimary() { + if (shardRouting.primary() == false) { + // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException + throw new IllegalStateException("shard is not a primary " + shardRouting); + } + } + + private void verifyReplicationTarget() { + final IndexShardState state = state(); + if (shardRouting.primary() && shardRouting.active() && state != IndexShardState.RELOCATED) { + // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException + throw new IllegalStateException("active primary shard cannot be a replication target before " + + " relocation hand off " + shardRouting + ", state is [" + state + "]"); + } + } + protected final void verifyStartedOrRecovering() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read if (state != IndexShardState.STARTED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) { @@ -1014,7 +1023,9 @@ public class IndexShard extends AbstractIndexShardComponent { } } - /** Returns number of heap bytes used by the indexing buffer for this shard, or 0 if the shard is closed */ + /** + * Returns number of heap bytes used by the indexing buffer for this shard, or 0 if the shard is closed + */ public long getIndexBufferRAMBytesUsed() { Engine engine = getEngineOrNull(); if (engine == null) { @@ -1031,8 +1042,10 @@ public class IndexShard extends AbstractIndexShardComponent { this.shardEventListener.delegates.add(onShardFailure); } - /** Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last - * indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen. */ + /** + * Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last + * indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen. + */ public void checkIdle(long inactiveTimeNS) { Engine engineOrNull = getEngineOrNull(); if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) { @@ -1110,10 +1123,6 @@ public class IndexShard extends AbstractIndexShardComponent { return getEngine().getTranslog(); } - public PercolateStats percolateStats() { - return percolatorQueriesRegistry.stats(); - } - public IndexEventListener getIndexEventListener() { return indexEventListener; } @@ -1181,11 +1190,12 @@ public class IndexShard extends AbstractIndexShardComponent { } } catch (Exception e) { handleRefreshException(e); - }; + } } /** * Should be called for each no-op update operation to increment relevant statistics. + * * @param type the doc type of the update */ public void noopUpdate(String type) { @@ -1385,14 +1395,22 @@ public class IndexShard extends AbstractIndexShardComponent { public Releasable acquirePrimaryOperationLock() { verifyNotClosed(); - if (shardRouting.primary() == false) { - throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary"); - } + verifyPrimary(); return suspendableRefContainer.acquireUninterruptibly(); } - public Releasable acquireReplicaOperationLock() { + /** + * acquires operation log. If the given primary term is lower then the one in {@link #shardRouting} + * an {@link IllegalArgumentException} is thrown. + */ + public Releasable acquireReplicaOperationLock(long opPrimaryTerm) { verifyNotClosed(); + verifyReplicationTarget(); + if (primaryTerm > opPrimaryTerm) { + // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException + throw new IllegalArgumentException(LoggerMessageFormat.format("{} operation term [{}] is too old (current [{}])", + shardId, opPrimaryTerm, primaryTerm)); + } return suspendableRefContainer.acquireUninterruptibly(); } @@ -1496,7 +1514,7 @@ public class IndexShard extends AbstractIndexShardComponent { * Returns true iff one or more changes to the engine are not visible to via the current searcher. * Otherwise false. * - * @throws EngineClosedException if the engine is already closed + * @throws EngineClosedException if the engine is already closed * @throws AlreadyClosedException if the internal indexwriter in the engine is already closed */ public boolean isRefreshNeeded() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index 5518d1b1273..774052b3a5f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -20,8 +20,8 @@ package org.elasticsearch.index.shard; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; @@ -33,6 +33,7 @@ import org.elasticsearch.index.SearchSlowLog; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -44,9 +45,13 @@ import java.io.IOException; */ public final class ShadowIndexShard extends IndexShard { - public ShadowIndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory, - IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, NodeServicesProvider provider, SearchSlowLog searchSlowLog, Engine.Warmer engineWarmer) throws IOException { - super(shardId, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, wrapper, provider, searchSlowLog, engineWarmer); + public ShadowIndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, + MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, + @Nullable EngineFactory engineFactory, IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, + ThreadPool threadPool, BigArrays bigArrays, SearchSlowLog searchSlowLog, Engine.Warmer engineWarmer) + throws IOException { + super(shardId, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, + indexEventListener, wrapper, threadPool, bigArrays, searchSlowLog, engineWarmer); } /** diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java index 3dea5501c62..a9bc63ae44f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -98,7 +98,7 @@ public class ShardId implements Streamable, Comparable { @Override public void readFrom(StreamInput in) throws IOException { - index = Index.readIndex(in); + index = new Index(in); shardId = in.readVInt(); hashCode = computeHashCode(); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 3d6fbf08102..be0d51bd2b6 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -29,7 +29,6 @@ import java.io.IOException; import java.nio.file.FileStore; import java.nio.file.Files; import java.nio.file.Path; -import java.util.HashMap; import java.util.Map; public final class ShardPath { @@ -37,22 +36,20 @@ public final class ShardPath { public static final String TRANSLOG_FOLDER_NAME = "translog"; private final Path path; - private final String indexUUID; private final ShardId shardId; private final Path shardStatePath; private final boolean isCustomDataPath; - public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, String indexUUID, ShardId shardId) { + public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) { assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString(); assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); - assert dataPath.getParent().getFileName().toString().equals(shardId.getIndexName()) : "dataPath must end with index/shardID but didn't: " + dataPath.toString(); - assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndexName()) : "shardStatePath must end with index/shardID but didn't: " + dataPath.toString(); + assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString(); + assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString(); if (isCustomDataPath && dataPath.equals(shardStatePath)) { throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths"); } this.isCustomDataPath = isCustomDataPath; this.path = dataPath; - this.indexUUID = indexUUID; this.shardId = shardId; this.shardStatePath = shardStatePath; } @@ -73,10 +70,6 @@ public final class ShardPath { return Files.exists(path); } - public String getIndexUUID() { - return indexUUID; - } - public ShardId getShardId() { return shardId; } @@ -144,7 +137,7 @@ public final class ShardPath { dataPath = statePath; } logger.debug("{} loaded data path [{}], state path [{}]", shardId, dataPath, statePath); - return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, indexUUID, shardId); + return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId); } } @@ -168,34 +161,6 @@ public final class ShardPath { } } - /** Maps each path.data path to a "guess" of how many bytes the shards allocated to that path might additionally use over their - * lifetime; we do this so a bunch of newly allocated shards won't just all go the path with the most free space at this moment. */ - private static Map getEstimatedReservedBytes(NodeEnvironment env, long avgShardSizeInBytes, Iterable shards) throws IOException { - long totFreeSpace = 0; - for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { - totFreeSpace += nodePath.fileStore.getUsableSpace(); - } - - // Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average - // shard size across the cluster and 5% of the total available free space on this node: - long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0)); - - // Collate predicted (guessed!) disk usage on each path.data: - Map reservedBytes = new HashMap<>(); - for (IndexShard shard : shards) { - Path dataPath = NodeEnvironment.shardStatePathToDataPath(shard.shardPath().getShardStatePath()); - - // Remove indices// subdirs from the statePath to get back to the path.data/: - Long curBytes = reservedBytes.get(dataPath); - if (curBytes == null) { - curBytes = 0L; - } - reservedBytes.put(dataPath, curBytes + estShardSizeInBytes); - } - - return reservedBytes; - } - public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shardId, IndexSettings indexSettings, long avgShardSizeInBytes, Map dataPathToShardCount) throws IOException { @@ -206,7 +171,6 @@ public final class ShardPath { dataPath = env.resolveCustomLocation(indexSettings, shardId); statePath = env.nodePaths()[0].resolve(shardId); } else { - long totFreeSpace = 0; for (NodeEnvironment.NodePath nodePath : env.nodePaths()) { totFreeSpace += nodePath.fileStore.getUsableSpace(); @@ -241,9 +205,7 @@ public final class ShardPath { statePath = bestPath.resolve(shardId); dataPath = statePath; } - - final String indexUUID = indexSettings.getUUID(); - return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, indexUUID, shardId); + return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId); } @Override @@ -258,9 +220,6 @@ public final class ShardPath { if (shardId != null ? !shardId.equals(shardPath.shardId) : shardPath.shardId != null) { return false; } - if (indexUUID != null ? !indexUUID.equals(shardPath.indexUUID) : shardPath.indexUUID != null) { - return false; - } if (path != null ? !path.equals(shardPath.path) : shardPath.path != null) { return false; } @@ -271,7 +230,6 @@ public final class ShardPath { @Override public int hashCode() { int result = path != null ? path.hashCode() : 0; - result = 31 * result + (indexUUID != null ? indexUUID.hashCode() : 0); result = 31 * result + (shardId != null ? shardId.hashCode() : 0); return result; } @@ -280,7 +238,6 @@ public final class ShardPath { public String toString() { return "ShardPath{" + "path=" + path + - ", indexUUID='" + indexUUID + '\'' + ", shard=" + shardId + '}'; } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java b/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java index 315371c7286..407f271fd65 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java @@ -102,7 +102,7 @@ public final class ShardStateMetaData { return "version [" + legacyVersion + "], primary [" + primary + "], allocation [" + allocationId + "]"; } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.JSON, SHARD_STATE_FILE_PREFIX) { + public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(XContentType.SMILE, SHARD_STATE_FILE_PREFIX) { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index e057349223d..d11e6734025 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -128,9 +128,8 @@ final class StoreRecovery { assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]"; if (logger.isTraceEnabled()) { - StringBuilder sb = new StringBuilder(); - sb.append("recovery completed from ").append("shard_store").append(", took [").append(timeValueMillis(recoveryState.getTimer().time())).append("]\n"); RecoveryState.Index index = recoveryState.getIndex(); + StringBuilder sb = new StringBuilder(); sb.append(" index : files [").append(index.totalFileCount()).append("] with total_size [") .append(new ByteSizeValue(index.totalBytes())).append("], took[") .append(TimeValue.timeValueMillis(index.time())).append("]\n"); @@ -142,7 +141,7 @@ final class StoreRecovery { .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n"); sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()) .append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]"); - logger.trace(sb.toString()); + logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb); } else if (logger.isDebugEnabled()) { logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time())); } diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index e950ebda1b3..865c0eb685b 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexModule; @@ -63,6 +64,10 @@ public final class SimilarityService extends AbstractIndexComponent { Map similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX); for (Map.Entry entry : similaritySettings.entrySet()) { String name = entry.getKey(); + // Starting with v5.0 indices, it should no longer be possible to redefine built-in similarities + if(BUILT_IN.containsKey(name) && indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1)) { + throw new IllegalArgumentException("Cannot redefine built-in Similarity [" + name + "]"); + } Settings settings = entry.getValue(); String typeName = settings.get("type"); if (typeName == null) { @@ -76,9 +81,16 @@ public final class SimilarityService extends AbstractIndexComponent { } providers.put(name, factory.apply(name, settings)); } - addSimilarities(similaritySettings, providers, DEFAULTS); + for (Map.Entry entry : addSimilarities(similaritySettings, DEFAULTS).entrySet()) { + // Avoid overwriting custom providers for indices older that v5.0 + if (providers.containsKey(entry.getKey()) && indexSettings.getIndexVersionCreated().before(Version.V_5_0_0_alpha1)) { + continue; + } + providers.put(entry.getKey(), entry.getValue()); + } this.similarities = providers; - defaultSimilarity = providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); + defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() + : providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); // Expert users can configure the base type as being different to default, but out-of-box we use default. baseSimilarity = (providers.get("base") != null) ? providers.get("base").get() : defaultSimilarity; @@ -90,7 +102,9 @@ public final class SimilarityService extends AbstractIndexComponent { defaultSimilarity; } - private void addSimilarities(Map similaritySettings, Map providers, Map> similarities) { + private Map addSimilarities(Map similaritySettings, + Map> similarities) { + Map providers = new HashMap<>(similarities.size()); for (Map.Entry> entry : similarities.entrySet()) { String name = entry.getKey(); BiFunction factory = entry.getValue(); @@ -100,12 +114,17 @@ public final class SimilarityService extends AbstractIndexComponent { } providers.put(name, factory.apply(name, settings)); } + return providers; } public SimilarityProvider getSimilarity(String name) { return similarities.get(name); } + public SimilarityProvider getDefaultSimilarity() { + return similarities.get("default"); + } + static class PerFieldSimilarity extends PerFieldSimilarityWrapper { private final Similarity defaultSimilarity; diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java index 330787a68a3..c15d2cfcdbe 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java @@ -32,9 +32,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index 06bc6a84a88..584b98cff33 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -36,7 +36,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; @@ -61,8 +61,9 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim return SimpleFSLockFactory.INSTANCE; default: throw new IllegalArgumentException("unrecognized [index.store.fs.fs_lock] \"" + s + "\": must be native or simple"); - } - }, false, Setting.Scope.INDEX); + } // can we set on both - node and index level, some nodes might be running on NFS so they might need simple rather than native + }, Property.IndexScope, Property.NodeScope); + private final CounterMetric rateLimitingTimeInNanos = new CounterMetric(); private final ShardPath path; @@ -108,7 +109,8 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.DEFAULT.getSettingsKey()); + final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), + IndexModule.Type.DEFAULT.getSettingsKey()); if (IndexModule.Type.FS.match(storeType) || IndexModule.Type.DEFAULT.match(storeType)) { final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults if (open instanceof MMapDirectory && Constants.WINDOWS == false) { diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java index e98ad7cc6eb..9e01d871765 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; @@ -29,8 +30,12 @@ import org.elasticsearch.index.shard.ShardPath; * */ public class IndexStore extends AbstractIndexComponent { - public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, true, Setting.Scope.INDEX) ; - public static final Setting INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_THROTTLE_TYPE_SETTING = + new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, + Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), + Property.Dynamic, Property.IndexScope); protected final IndexStoreConfig indexStoreConfig; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index ab7075afa5b..12558bb9554 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -22,6 +22,7 @@ import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -36,11 +37,15 @@ public class IndexStoreConfig { /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = + new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, + Property.Dynamic, Property.NodeScope); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ - public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), + Property.Dynamic, Property.NodeScope); private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 77e7f32f5f5..e0ed3bc98b7 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -49,7 +49,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -61,6 +60,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.Callback; @@ -90,7 +90,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.zip.Adler32; import java.util.zip.CRC32; import java.util.zip.Checksum; @@ -124,7 +123,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; static final String CORRUPTED = "corrupted_"; - public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.INDEX); + public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = + Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); private final AtomicBoolean isClosed = new AtomicBoolean(false); private final StoreDirectory directory; @@ -379,7 +379,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref if (isClosed.compareAndSet(false, true)) { // only do this once! decRef(); - logger.debug("store reference count on close: " + refCounter.refCount()); + logger.debug("store reference count on close: {}", refCounter.refCount()); } } diff --git a/core/src/main/java/org/elasticsearch/index/suggest/stats/ShardSuggestMetric.java b/core/src/main/java/org/elasticsearch/index/suggest/stats/ShardSuggestMetric.java deleted file mode 100644 index 750d7de7b22..00000000000 --- a/core/src/main/java/org/elasticsearch/index/suggest/stats/ShardSuggestMetric.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.suggest.stats; - -import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.metrics.MeanMetric; - -import java.util.concurrent.TimeUnit; - -/** - * - */ -public final class ShardSuggestMetric { - private final MeanMetric suggestMetric = new MeanMetric(); - private final CounterMetric currentMetric = new CounterMetric(); - - /** - * Called before suggest - */ - public void preSuggest() { - currentMetric.inc(); - } - - /** - * Called after suggest - * @param tookInNanos time of suggest used in nanos - */ - public void postSuggest(long tookInNanos) { - currentMetric.dec(); - suggestMetric.inc(tookInNanos); - } - - /** - * @return The current stats - */ - public SuggestStats stats() { - return new SuggestStats(suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), currentMetric.count()); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/suggest/stats/SuggestStats.java b/core/src/main/java/org/elasticsearch/index/suggest/stats/SuggestStats.java deleted file mode 100644 index 1183a64833a..00000000000 --- a/core/src/main/java/org/elasticsearch/index/suggest/stats/SuggestStats.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.suggest.stats; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; - -import java.io.IOException; - -/** - * Exposes suggest related statistics. - */ -public class SuggestStats implements Streamable, ToXContent { - - private long suggestCount; - private long suggestTimeInMillis; - private long current; - - public SuggestStats() { - } - - SuggestStats(long suggestCount, long suggestTimeInMillis, long current) { - this.suggestCount = suggestCount; - this.suggestTimeInMillis = suggestTimeInMillis; - this.current = current; - } - - /** - * @return The number of times the suggest api has been invoked. - */ - public long getCount() { - return suggestCount; - } - - /** - * @return The total amount of time spend in the suggest api - */ - public long getTimeInMillis() { - return suggestTimeInMillis; - } - - /** - * @return The total amount of time spend in the suggest api - */ - public TimeValue getTime() { - return new TimeValue(getTimeInMillis()); - } - - /** - * @return The total amount of active suggest api invocations. - */ - public long getCurrent() { - return current; - } - - public void add(SuggestStats suggestStats) { - if (suggestStats != null) { - suggestCount += suggestStats.getCount(); - suggestTimeInMillis += suggestStats.getTimeInMillis(); - current += suggestStats.getCurrent(); - } - } - - public static SuggestStats readSuggestStats(StreamInput in) throws IOException { - SuggestStats stats = new SuggestStats(); - stats.readFrom(in); - return stats; - } - - static final class Fields { - static final XContentBuilderString SUGGEST = new XContentBuilderString("suggest"); - static final XContentBuilderString TOTAL = new XContentBuilderString("total"); - static final XContentBuilderString TIME = new XContentBuilderString("time"); - static final XContentBuilderString TIME_IN_MILLIS = new XContentBuilderString("time_in_millis"); - static final XContentBuilderString CURRENT = new XContentBuilderString("current"); - } - - - @Override - public void readFrom(StreamInput in) throws IOException { - suggestCount = in.readVLong(); - suggestTimeInMillis = in.readVLong(); - current = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(suggestCount); - out.writeVLong(suggestTimeInMillis); - out.writeVLong(current); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.SUGGEST); - builder.field(Fields.TOTAL, suggestCount); - builder.timeValueField(Fields.TIME_IN_MILLIS, Fields.TIME, suggestTimeInMillis); - builder.field(Fields.CURRENT, current); - builder.endObject(); - return builder; - } -} diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 926ff482248..bd01e7f0183 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -30,10 +30,9 @@ import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.cache.query.QueryCacheStats; @@ -50,9 +49,9 @@ import java.util.concurrent.ConcurrentHashMap; public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.byteSizeSetting( - "indices.queries.cache.size", "10%", false, Scope.CLUSTER); + "indices.queries.cache.size", "10%", Property.NodeScope); public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting( - "indices.queries.cache.count", 10000, 1, false, Scope.CLUSTER); + "indices.queries.cache.count", 10000, 1, Property.NodeScope); private final LRUQueryCache cache; private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index 32b5f55b369..4b4aa4e8df2 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -68,12 +69,12 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo * A setting to enable or disable request caching on an index level. Its dynamic by default * since we are checking on the cluster state IndexMetaData always. */ - public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", - false, true, Setting.Scope.INDEX); - public static final Setting INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", - false, Setting.Scope.CLUSTER); - public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", - new TimeValue(0), false, Setting.Scope.CLUSTER); + public static final Setting INDEX_CACHE_REQUEST_ENABLED_SETTING = + Setting.boolSetting("index.requests.cache.enable", true, Property.Dynamic, Property.IndexScope); + public static final Setting INDICES_CACHE_QUERY_SIZE = + Setting.byteSizeSetting("indices.requests.cache.size", "1%", Property.NodeScope); + public static final Setting INDICES_CACHE_QUERY_EXPIRE = + Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), Property.NodeScope); private final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 440a11a1904..2719075a5c7 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; @@ -30,10 +31,11 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; @@ -47,6 +49,7 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -61,10 +64,10 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.request.ShardRequestCache; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; @@ -73,6 +76,7 @@ import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; @@ -87,12 +91,13 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; +import java.io.Closeable; import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -103,6 +108,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -115,7 +121,8 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; public class IndicesService extends AbstractLifecycleComponent implements Iterable, IndexService.ShardStoreDeleter { public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; - public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = + Setting.positiveTimeSetting("indices.cache.cleanup_interval", TimeValue.timeValueMinutes(1), Property.NodeScope); private final PluginsService pluginsService; private final NodeEnvironment nodeEnv; private final TimeValue shardsClosedTimeout; @@ -169,7 +176,7 @@ public class IndicesService extends AbstractLifecycleComponent i this.circuitBreakerService = circuitBreakerService; this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { @Override - public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or equal to 0 and not [" + sizeInBytes + "]"; circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes); } @@ -185,14 +192,14 @@ public class IndicesService extends AbstractLifecycleComponent i ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory("indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop - Set indices = new HashSet<>(this.indices.keySet()); + final Set indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(indices.size()); - for (final String index : indices) { + for (final Index index : indices) { indicesStopExecutor.execute(() -> { try { removeIndex(index, "shutdown", false); } catch (Throwable e) { - logger.warn("failed to remove index on stop [" + index + "]", e); + logger.warn("failed to remove index on stop [{}]", e, index); } finally { latch.countDown(); } @@ -256,13 +263,13 @@ public class IndicesService extends AbstractLifecycleComponent i } Map> statsByShard = new HashMap<>(); - for (IndexService indexService : indices.values()) { + for (IndexService indexService : this) { for (IndexShard indexShard : indexService) { try { if (indexShard.routingEntry() == null) { continue; } - IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexShard, flags), indexShard.commitStats()) }); + IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()) }); if (!statsByShard.containsKey(indexService.index())) { statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); } else { @@ -290,17 +297,8 @@ public class IndicesService extends AbstractLifecycleComponent i return indices.values().iterator(); } - public boolean hasIndex(String index) { - return indices.containsKey(index); - } - - /** - * Returns an IndexService for the specified index if exists otherwise returns null. - * - */ - @Nullable - public IndexService indexService(String index) { - return indices.get(index); + public boolean hasIndex(Index index) { + return indices.containsKey(index.getUUID()); } /** @@ -309,33 +307,21 @@ public class IndicesService extends AbstractLifecycleComponent i */ @Nullable public IndexService indexService(Index index) { - return indexService(index.getName()); - } - - /** - * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. - */ - public IndexService indexServiceSafe(String index) { - IndexService indexService = indexService(index); - if (indexService == null) { - throw new IndexNotFoundException(index); - } - return indexService; + return indices.get(index.getUUID()); } /** * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. */ public IndexService indexServiceSafe(Index index) { - IndexService indexService = indexServiceSafe(index.getName()); - if (indexService.indexUUID().equals(index.getUUID()) == false) { + IndexService indexService = indices.get(index.getUUID()); + if (indexService == null) { throw new IndexNotFoundException(index); } + assert indexService.indexUUID().equals(index.getUUID()) : "uuid mismatch local: " + indexService.indexUUID() + " incoming: " + index.getUUID(); return indexService; } - - /** * Creates a new {@link IndexService} for the given metadata. * @param indexMetaData the index metadata to create the index for @@ -343,42 +329,31 @@ public class IndicesService extends AbstractLifecycleComponent i * @throws IndexAlreadyExistsException if the index already exists. */ public synchronized IndexService createIndex(final NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, List builtInListeners) throws IOException { + if (!lifecycle.started()) { throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed"); } + if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) { + throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]"); + } final Index index = indexMetaData.getIndex(); - final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); - final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); - if (indices.containsKey(index.getName())) { + if (hasIndex(index)) { throw new IndexAlreadyExistsException(index); } - logger.debug("creating Index [{}], shards [{}]/[{}{}]", - indexMetaData.getIndex(), - idxSettings.getNumberOfShards(), - idxSettings.getNumberOfReplicas(), - idxSettings.isShadowReplicaIndex() ? "s" : ""); - - final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry); - pluginsService.onIndexModule(indexModule); - for (IndexEventListener listener : builtInListeners) { - indexModule.addIndexEventListener(listener); - } + List finalListeners = new ArrayList<>(builtInListeners); final IndexEventListener onStoreClose = new IndexEventListener() { @Override public void onStoreClosed(ShardId shardId) { indicesQueryCache.onClose(shardId); } }; - indexModule.addIndexEventListener(onStoreClose); - indexModule.addIndexEventListener(oldShardsStats); - final IndexEventListener listener = indexModule.freeze(); - listener.beforeIndexCreated(index, idxSettings.getSettings()); - final IndexService indexService = indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, indicesQueryCache, mapperRegistry, indicesFieldDataCache, indexingMemoryController); + finalListeners.add(onStoreClose); + finalListeners.add(oldShardsStats); + final IndexService indexService = createIndexService("create index", nodeServicesProvider, indexMetaData, indicesQueryCache, indicesFieldDataCache, finalListeners, indexingMemoryController); boolean success = false; try { - assert indexService.getIndexEventListener() == listener; - listener.afterIndexCreated(indexService); - indices = newMapBuilder(indices).put(index.getName(), indexService).immutableMap(); + indexService.getIndexEventListener().afterIndexCreated(indexService); + indices = newMapBuilder(indices).put(index.getUUID(), indexService).immutableMap(); success = true; return indexService; } finally { @@ -386,7 +361,54 @@ public class IndicesService extends AbstractLifecycleComponent i indexService.close("plugins_failed", true); } } + } + /** + * This creates a new IndexService without registering it + */ + private synchronized IndexService createIndexService(final String reason, final NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, IndicesQueryCache indicesQueryCache, IndicesFieldDataCache indicesFieldDataCache, List builtInListeners, IndexingOperationListener... indexingOperationListeners) throws IOException { + final Index index = indexMetaData.getIndex(); + final Predicate indexNameMatcher = (indexExpression) -> indexNameExpressionResolver.matchesIndex(index.getName(), indexExpression, clusterService.state()); + final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexNameMatcher, indexScopeSetting); + logger.debug("creating Index [{}], shards [{}]/[{}{}] - reason [{}]", + indexMetaData.getIndex(), + idxSettings.getNumberOfShards(), + idxSettings.getNumberOfReplicas(), + idxSettings.isShadowReplicaIndex() ? "s" : "", reason); + + final IndexModule indexModule = new IndexModule(idxSettings, indexStoreConfig, analysisRegistry); + pluginsService.onIndexModule(indexModule); + for (IndexEventListener listener : builtInListeners) { + indexModule.addIndexEventListener(listener); + } + final IndexEventListener listener = indexModule.freeze(); + listener.beforeIndexCreated(index, idxSettings.getSettings()); + return indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, indicesQueryCache, mapperRegistry, indicesFieldDataCache, indexingOperationListeners); + } + + /** + * This method verifies that the given {@link IndexMetaData} holds sane values to create an {@link IndexService}. This method will throw an + * exception if the creation fails. The created {@link IndexService} will not be registered and will be closed immediately. + */ + public synchronized void verifyIndexMetadata(final NodeServicesProvider nodeServicesProvider, IndexMetaData metaData) throws IOException { + final List closeables = new ArrayList<>(); + try { + IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {}); + closeables.add(indicesFieldDataCache); + IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings); + closeables.add(indicesQueryCache); + // this will also fail if some plugin fails etc. which is nice since we can verify that early + final IndexService service = createIndexService("metadata verification", nodeServicesProvider, + metaData, indicesQueryCache, indicesFieldDataCache, Collections.emptyList()); + for (ObjectCursor typeMapping : metaData.getMappings().values()) { + // don't apply the default mapping, it has been applied when the mapping was created + service.mapperService().merge(typeMapping.value.type(), typeMapping.value.source(), + MapperService.MergeReason.MAPPING_RECOVERY, true); + } + closeables.add(() -> service.close("metadata verification", false)); + } finally { + IOUtils.close(closeables); + } } /** @@ -395,22 +417,24 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to remove * @param reason the high level reason causing this removal */ - public void removeIndex(String index, String reason) { + public void removeIndex(Index index, String reason) { removeIndex(index, reason, false); } - private void removeIndex(String index, String reason, boolean delete) { + private void removeIndex(Index index, String reason, boolean delete) { + final String indexName = index.getName(); try { final IndexService indexService; final IndexEventListener listener; synchronized (this) { - if (indices.containsKey(index) == false) { + if (hasIndex(index) == false) { return; } - logger.debug("[{}] closing ... (reason [{}])", index, reason); + logger.debug("[{}] closing ... (reason [{}])", indexName, reason); Map newIndices = new HashMap<>(indices); - indexService = newIndices.remove(index); + indexService = newIndices.remove(index.getUUID()); + assert indexService != null : "IndexService is null for index: " + index; indices = unmodifiableMap(newIndices); listener = indexService.getIndexEventListener(); } @@ -419,9 +443,9 @@ public class IndicesService extends AbstractLifecycleComponent i if (delete) { listener.beforeIndexDeleted(indexService); } - logger.debug("[{}] closing index service (reason [{}])", index, reason); + logger.debug("{} closing index service (reason [{}])", index, reason); indexService.close(reason, delete); - logger.debug("[{}] closed... (reason [{}])", index, reason); + logger.debug("{} closed... (reason [{}])", index, reason); listener.afterIndexClosed(indexService.index(), indexService.getIndexSettings().getSettings()); if (delete) { final IndexSettings indexSettings = indexService.getIndexSettings(); @@ -474,12 +498,12 @@ public class IndicesService extends AbstractLifecycleComponent i * Deletes the given index. Persistent parts of the index * like the shards files, state and transaction logs are removed once all resources are released. * - * Equivalent to {@link #removeIndex(String, String)} but fires + * Equivalent to {@link #removeIndex(Index, String)} but fires * different lifecycle events to ensure pending resources of this index are immediately removed. * @param index the index to delete * @param reason the high level reason causing this delete */ - public void deleteIndex(String index, String reason) throws IOException { + public void deleteIndex(Index index, String reason) throws IOException { removeIndex(index, reason, true); } @@ -505,16 +529,17 @@ public class IndicesService extends AbstractLifecycleComponent i public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException { if (nodeEnv.hasNodeFile()) { synchronized (this) { - String indexName = metaData.getIndex().getName(); - if (indices.containsKey(indexName)) { - String localUUid = indices.get(indexName).indexUUID(); - throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); + Index index = metaData.getIndex(); + if (hasIndex(index)) { + String localUUid = indexService(index).indexUUID(); + throw new IllegalStateException("Can't delete index store for [" + index.getName() + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]"); } - if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { + + if (clusterState.metaData().hasIndex(index.getName()) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here - final IndexMetaData index = clusterState.metaData().index(indexName); - throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); + final IndexMetaData idxMeta = clusterState.metaData().index(index.getName()); + throw new IllegalStateException("Can't delete closed index store for [" + index.getName() + "] - it's still part of the cluster state [" + idxMeta.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } } final IndexSettings indexSettings = buildIndexSettings(metaData); @@ -543,7 +568,7 @@ public class IndicesService extends AbstractLifecycleComponent i } // this is a pure protection to make sure this index doesn't get re-imported as a dangling index. // we should in the future rather write a tombstone rather than wiping the metadata. - MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index.getName())); + MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index)); } } @@ -554,6 +579,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @param indexSettings the shards index settings. * @throws IOException if an IOException occurs */ + @Override public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexSettings) throws IOException { ShardId shardId = lock.getShardId(); logger.trace("{} deleting shard reason [{}]", shardId, reason); @@ -607,7 +633,7 @@ public class IndicesService extends AbstractLifecycleComponent i * @return true if the index can be deleted on this node */ public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings, boolean closed) { - final IndexService indexService = this.indices.get(index.getName()); + final IndexService indexService = indexService(index); // Closed indices may be deleted, even if they are on a shared // filesystem. Since it is closed we aren't deleting it for relocation if (indexSettings.isOnSharedFilesystem() == false || closed) { @@ -634,14 +660,19 @@ public class IndicesService extends AbstractLifecycleComponent i */ public boolean canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { assert shardId.getIndex().equals(indexSettings.getIndex()); - final IndexService indexService = this.indices.get(shardId.getIndexName()); + final IndexService indexService = indexService(shardId.getIndex()); if (indexSettings.isOnSharedFilesystem() == false) { - if (indexService != null && nodeEnv.hasNodeFile()) { - return indexService.hasShard(shardId.id()) == false; - } else if (nodeEnv.hasNodeFile()) { - if (indexSettings.hasCustomDataPath()) { + if (nodeEnv.hasNodeFile()) { + final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id()); + if (isAllocated) { + return false; // we are allocated - can't delete the shard + } else if (indexSettings.hasCustomDataPath()) { + // lets see if it's on a custom path (return false if the shared doesn't exist) + // we don't need to delete anything that is not there return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)); } else { + // lets see if it's path is available (return false if the shared doesn't exist) + // we don't need to delete anything that is not there return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)); } } @@ -661,6 +692,7 @@ public class IndicesService extends AbstractLifecycleComponent i /** * Adds a pending delete for the given index shard. */ + @Override public void addPendingDelete(ShardId shardId, IndexSettings settings) { if (shardId == null) { throw new IllegalArgumentException("shardId must not be null"); diff --git a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index 7d24d4fa897..237975f2899 100644 --- a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -38,13 +38,12 @@ import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import java.io.IOException; @@ -105,8 +104,8 @@ public class NodeIndicesStats implements Streamable, ToXContent { } @Nullable - public PercolateStats getPercolate() { - return stats.getPercolate(); + public PercolatorQueryCacheStats getPercolate() { + return stats.getPercolatorCache(); } @Nullable @@ -149,11 +148,6 @@ public class NodeIndicesStats implements Streamable, ToXContent { return stats.getSegments(); } - @Nullable - public SuggestStats getSuggest() { - return stats.getSuggest(); - } - @Nullable public RecoveryStats getRecoveryStats() { return stats.getRecoveryStats(); @@ -172,7 +166,7 @@ public class NodeIndicesStats implements Streamable, ToXContent { int entries = in.readVInt(); statsByShard = new HashMap<>(); for (int i = 0; i < entries; i++) { - Index index = Index.readIndex(in); + Index index = new Index(in); int indexShardListSize = in.readVInt(); List indexShardStats = new ArrayList<>(indexShardListSize); for (int j = 0; j < indexShardListSize; j++) { diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index e73396fcd7f..20a1d341cf9 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -160,15 +160,21 @@ public final class AnalysisModule extends AbstractModule { @Override protected void configure() { try { - HunspellService service = new HunspellService(environment.settings(), environment, knownDictionaries); - AnalysisRegistry registry = new AnalysisRegistry(service, environment, charFilters, tokenFilters, tokenizers, analyzers); - bind(HunspellService.class).toInstance(service); + AnalysisRegistry registry = buildRegistry(); + bind(HunspellService.class).toInstance(registry.getHunspellService()); bind(AnalysisRegistry.class).toInstance(registry); } catch (IOException e) { throw new ElasticsearchException("failed to load hunspell service", e); } } + /** + * Builds an {@link AnalysisRegistry} from the current configuration. + */ + public AnalysisRegistry buildRegistry() throws IOException { + return new AnalysisRegistry(new HunspellService(environment.settings(), environment, knownDictionaries), environment, charFilters, tokenFilters, tokenizers, analyzers); + } + /** * AnalysisProvider is the basic factory interface for registering analysis components like: *

    diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 5d2fb761842..75c15f09778 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -26,6 +26,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -73,9 +74,12 @@ import java.util.function.Function; */ public class HunspellService extends AbstractComponent { - public final static Setting HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, false, Setting.Scope.CLUSTER); - public final static Setting HUNSPELL_IGNORE_CASE = Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, false, Setting.Scope.CLUSTER); - public final static Setting HUNSPELL_DICTIONARY_OPTIONS = Setting.groupSetting("indices.analysis.hunspell.dictionary.", false, Setting.Scope.CLUSTER); + public final static Setting HUNSPELL_LAZY_LOAD = + Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, Property.NodeScope); + public final static Setting HUNSPELL_IGNORE_CASE = + Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, Property.NodeScope); + public final static Setting HUNSPELL_DICTIONARY_OPTIONS = + Setting.groupSetting("indices.analysis.hunspell.dictionary.", Property.NodeScope); private final ConcurrentHashMap dictionaries = new ConcurrentHashMap<>(); private final Map knownDictionaries; private final boolean defaultIgnoreCase; diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 0e1532bc6b3..d2d96092186 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -46,15 +47,22 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap(); - public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER); + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.total.limit", "70%", Property.Dynamic, Property.NodeScope); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER); - public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", Property.Dynamic, Property.NodeScope); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = + Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, Property.Dynamic, Property.NodeScope); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = + new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); - public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER); - public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER); - public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); + public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = + Setting.byteSizeSetting("indices.breaker.request.limit", "40%", Property.Dynamic, Property.NodeScope); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = + Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, Property.Dynamic, Property.NodeScope); + public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = + new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 7998afb7656..82f1466bf13 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -19,10 +19,8 @@ package org.elasticsearch.indices.cluster; -import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; @@ -35,9 +33,11 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RestoreSource; +import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.compress.CompressedXContent; @@ -46,6 +46,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Callback; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexShardAlreadyExistsException; @@ -70,9 +71,11 @@ import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentMap; /** @@ -89,7 +92,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry entry = iterator.next(); + ShardId failedShardId = entry.getKey(); + ShardRouting failedShardRouting = entry.getValue(); + IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex()); + if (indexRoutingTable == null) { + iterator.remove(); + continue; + } + IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id()); + if (shardRoutingTable == null) { + iterator.remove(); + continue; + } + if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) { + iterator.remove(); } } } @@ -217,17 +224,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent newShardAllocationIds = new HashSet<>(); for (IndexService indexService : indicesService) { - String indexName = indexService.index().getName(); - IndexMetaData indexMetaData = event.state().metaData().index(indexName); - if (indexMetaData == null) { - continue; - } + Index index = indexService.index(); + IndexMetaData indexMetaData = event.state().metaData().index(index); + assert indexMetaData != null : "local index doesn't have metadata, should have been cleaned up by applyDeletedIndices: " + index; // now, go over and delete shards that needs to get deleted - newShardIds.clear(); + newShardAllocationIds.clear(); for (ShardRouting shard : routingNode) { - if (shard.index().getName().equals(indexName)) { - newShardIds.add(shard.id()); + if (shard.index().equals(index)) { + // use the allocation id and not object so we won't be influence by relocation targets + newShardAllocationIds.add(shard.allocationId().getId()); } } - for (Integer existingShardId : indexService.shardIds()) { - if (!newShardIds.contains(existingShardId)) { + for (IndexShard existingShard : indexService) { + if (newShardAllocationIds.contains(existingShard.routingEntry().allocationId().getId()) == false) { if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { if (logger.isDebugEnabled()) { - logger.debug("[{}][{}] removing shard (index is closed)", indexName, existingShardId); + logger.debug("{} removing shard (index is closed)", existingShard.shardId()); } - indexService.removeShard(existingShardId, "removing shard (index is closed)"); + indexService.removeShard(existingShard.shardId().id(), "removing shard (index is closed)"); } else { // we can just remove the shard, without cleaning it locally, since we will clean it // when all shards are allocated in the IndicesStore if (logger.isDebugEnabled()) { - logger.debug("[{}][{}] removing shard (not allocated)", indexName, existingShardId); + logger.debug("{} removing shard (not allocated)", existingShard.shardId()); } - indexService.removeShard(existingShardId, "removing shard (not allocated)"); + indexService.removeShard(existingShard.shardId().id(), "removing shard (not allocated)"); } } } } } + private void applyCleanedIndices(final ClusterChangedEvent event) { + // handle closed indices, since they are not allocated on a node once they are closed + // so applyDeletedIndices might not take them into account + for (IndexService indexService : indicesService) { + Index index = indexService.index(); + IndexMetaData indexMetaData = event.state().metaData().index(index); + if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) { + for (Integer shardId : indexService.shardIds()) { + logger.debug("{}[{}] removing shard (index is closed)", index, shardId); + try { + indexService.removeShard(shardId, "removing shard (index is closed)"); + } catch (Throwable e) { + logger.warn("{} failed to remove shard (index is closed)", e, index); + } + } + } + } + + final Set hasAllocations = new HashSet<>(); + final RoutingNode node = event.state().getRoutingNodes().node(event.state().nodes().localNodeId()); + // if no shards are allocated ie. if this node is a master-only node it can return nul + if (node != null) { + for (ShardRouting routing : node) { + hasAllocations.add(routing.index()); + } + } + for (IndexService indexService : indicesService) { + Index index = indexService.index(); + if (hasAllocations.contains(index) == false) { + assert indexService.shardIds().isEmpty() : + "no locally assigned shards, but index wasn't emptied by applyDeletedShards." + + " index " + index + ", shards: " + indexService.shardIds(); + if (logger.isDebugEnabled()) { + logger.debug("{} cleaning index (no shards allocated)", index); + } + // clean the index + removeIndex(index, "removing index (no shards allocated)"); + } + } + } + + private void applyIndexMetaData(ClusterChangedEvent event) { + if (!event.metaDataChanged()) { + return; + } + for (IndexMetaData indexMetaData : event.state().metaData()) { + if (!indicesService.hasIndex(indexMetaData.getIndex())) { + // we only create / update here + continue; + } + // if the index meta data didn't change, no need check for refreshed settings + if (!event.indexMetaDataChanged(indexMetaData)) { + continue; + } + Index index = indexMetaData.getIndex(); + IndexService indexService = indicesService.indexService(index); + if (indexService == null) { + // already deleted on us, ignore it + continue; + } + indexService.updateMetaData(indexMetaData); + } + } + private void applyNewIndices(final ClusterChangedEvent event) { // we only create indices for shards that are allocated RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId()); @@ -298,8 +367,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent !status.sourceNode().equals(sourceNode))) { @@ -477,7 +519,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) { - Map.Entry entry = iterator.next(); - ShardId failedShardId = entry.getKey(); - ShardRouting failedShardRouting = entry.getValue(); - IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex()); - if (indexRoutingTable == null) { - iterator.remove(); - continue; - } - IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id()); - if (shardRoutingTable == null) { - iterator.remove(); - continue; - } - if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) { - iterator.remove(); - } - } - } - - private void applyInitializingShard(final ClusterState state, final IndexMetaData indexMetaData, final ShardRouting shardRouting) { - final IndexService indexService = indicesService.indexService(shardRouting.index()); - if (indexService == null) { - // got deleted on us, ignore - return; - } + private void applyInitializingShard(final ClusterState state, final IndexMetaData indexMetaData, IndexService indexService, final ShardRouting shardRouting) { final RoutingTable routingTable = state.routingTable(); final DiscoveryNodes nodes = state.getNodes(); final int shardId = shardRouting.id(); @@ -537,7 +547,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { try { @@ -634,7 +644,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { final ShardId sId = indexShard.shardId(); @@ -727,7 +737,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent { @Override public void handle(final IndexShard.ShardFailure shardFailure) { - final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex()); final ShardRouting shardRouting = shardFailure.routing; threadPool.generic().execute(() -> { synchronized (mutex) { diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 0a3f063dfcc..4f7d482f8a9 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -34,11 +34,11 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.AtomicFieldData; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.shard.ShardId; @@ -52,7 +52,8 @@ import java.util.function.ToLongBiFunction; */ public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener, Releasable{ - public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = + Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope); private final IndexFieldDataCache.Listener indicesFieldDataCacheListener; private final Cache cache; @@ -73,8 +74,8 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL cache.invalidateAll(); } - public IndexFieldDataCache buildIndexFieldDataCache(IndexFieldDataCache.Listener listener, Index index, String fieldName, FieldDataType fieldDataType) { - return new IndexFieldCache(logger, cache, index, fieldName, fieldDataType, indicesFieldDataCacheListener, listener); + public IndexFieldDataCache buildIndexFieldDataCache(IndexFieldDataCache.Listener listener, Index index, String fieldName) { + return new IndexFieldCache(logger, cache, index, fieldName, indicesFieldDataCacheListener, listener); } public Cache getCache() { @@ -89,7 +90,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL final Accountable value = notification.getValue(); for (IndexFieldDataCache.Listener listener : key.listeners) { try { - listener.onRemoval(key.shardId, indexCache.fieldName, indexCache.fieldDataType, notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED, value.ramBytesUsed()); + listener.onRemoval(key.shardId, indexCache.fieldName, notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED, value.ramBytesUsed()); } catch (Throwable e) { // load anyway since listeners should not throw exceptions logger.error("Failed to call listener on field data cache unloading", e); @@ -112,16 +113,14 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL private final ESLogger logger; final Index index; final String fieldName; - final FieldDataType fieldDataType; private final Cache cache; private final Listener[] listeners; - IndexFieldCache(ESLogger logger,final Cache cache, Index index, String fieldName, FieldDataType fieldDataType, Listener... listeners) { + IndexFieldCache(ESLogger logger,final Cache cache, Index index, String fieldName, Listener... listeners) { this.logger = logger; this.listeners = listeners; this.index = index; this.fieldName = fieldName; - this.fieldDataType = fieldDataType; this.cache = cache; } @@ -138,7 +137,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL final AtomicFieldData fieldData = indexFieldData.loadDirect(context); for (Listener listener : k.listeners) { try { - listener.onCache(shardId, fieldName, fieldDataType, fieldData); + listener.onCache(shardId, fieldName, fieldData); } catch (Throwable e) { // load anyway since listeners should not throw exceptions logger.error("Failed to call listener on atomic field data loading", e); @@ -162,7 +161,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL final Accountable ifd = (Accountable) indexFieldData.localGlobalDirect(indexReader); for (Listener listener : k.listeners) { try { - listener.onCache(shardId, fieldName, fieldDataType, ifd); + listener.onCache(shardId, fieldName, ifd); } catch (Throwable e) { // load anyway since listeners should not throw exceptions logger.error("Failed to call listener on global ordinals loading", e); diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java index c37cf6def79..1995bb2dfb8 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java @@ -22,7 +22,6 @@ package org.elasticsearch.indices.fielddata.cache; import org.apache.lucene.util.Accountable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -43,11 +42,11 @@ public class IndicesFieldDataCacheListener implements IndexFieldDataCache.Listen } @Override - public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable fieldData) { + public void onCache(ShardId shardId, String fieldName, Accountable fieldData) { } @Override - public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or equal to 0 and not [" + sizeInBytes + "]"; circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes); } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 6eb7c88a2a4..b1d7af7ff9c 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -39,6 +39,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -111,15 +112,15 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL */ public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) { final ClusterState state = clusterService.state(); - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); + final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map> results = ConcurrentCollections.newConcurrentMap(); int totalNumberOfShards = 0; int numberOfShards = 0; - for (String index : concreteIndices) { - final IndexMetaData indexMetaData = state.metaData().index(index); + for (Index index : concreteIndices) { + final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index); totalNumberOfShards += indexMetaData.getTotalNumberOfShards(); numberOfShards += indexMetaData.getNumberOfShards(); - results.put(index, Collections.synchronizedList(new ArrayList<>())); + results.put(index.getName(), Collections.synchronizedList(new ArrayList<>())); } if (numberOfShards == 0) { @@ -129,8 +130,9 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL final int finalTotalNumberOfShards = totalNumberOfShards; final CountDown countDown = new CountDown(numberOfShards); - for (final String index : concreteIndices) { - final IndexMetaData indexMetaData = state.metaData().index(index); + for (final Index concreteIndex : concreteIndices) { + final String index = concreteIndex.getName(); + final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex); final int indexNumberOfShards = indexMetaData.getNumberOfShards(); for (int shard = 0; shard < indexNumberOfShards; shard++) { final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard); @@ -240,7 +242,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) { final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.getIndexName()); if (indexRoutingTable == null) { - IndexMetaData index = state.getMetaData().index(shardId.getIndexName()); + IndexMetaData index = state.getMetaData().index(shardId.getIndex()); if (index != null && index.getState() == IndexMetaData.State.CLOSE) { throw new IndexClosedException(shardId.getIndex()); } @@ -309,7 +311,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); if (expectedCommitId == null) { - logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); + logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java index a9e90884a68..b0b212d2ab4 100644 --- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java @@ -19,12 +19,12 @@ package org.elasticsearch.indices.query; -import java.util.Map; - import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryParser; +import java.util.Map; + public class IndicesQueriesRegistry extends AbstractComponent { private Map> queryParsers; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 24f87ee436f..8494939e46d 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -238,7 +238,7 @@ public class RecoveriesCollection { return; } lastSeenAccessTime = accessTime; - logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", lastSeenAccessTime); + logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime); threadPool.schedule(checkInterval, ThreadPool.Names.GENERIC, this); } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 8d610dce05b..82595458479 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,31 +33,45 @@ import org.elasticsearch.common.unit.TimeValue; public class RecoverySettings extends AbstractComponent { - public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = + Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), + Property.Dynamic, Property.NodeScope); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = + Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), + Property.Dynamic, Property.NodeScope); /** how long to wait before retrying after network related issues */ - public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = + Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), + Property.Dynamic, Property.NodeScope); /** timeout value to use for requests made as part of the recovery process */ - public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), + Property.Dynamic, Property.NodeScope); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = + Setting.timeSetting("indices.recovery.internal_action_long_timeout", + (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), + TimeValue.timeValueSeconds(0), Property.Dynamic, Property.NodeScope); /** * recoveries that don't show any activity for more then this interval will be failed. * defaults to `indices.recovery.internal_action_long_timeout` */ - public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = + Setting.timeSetting("indices.recovery.recovery_activity_timeout", + (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), + Property.Dynamic, Property.NodeScope); public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java index 9a5c23fc2e1..aaf351f6056 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySource.java @@ -20,9 +20,9 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -83,7 +83,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe } private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException { - final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex().getName()); + final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); final IndexShard shard = indexService.getShard(request.shardId().id()); // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index b92e2066af2..b609eb5d08a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -137,7 +137,7 @@ public class RecoverySourceHandler { } } - logger.trace("snapshot translog for recovery. current size is [{}]", translogView.totalOperations()); + logger.trace("{} snapshot translog for recovery. current size is [{}]", shard.shardId(), translogView.totalOperations()); try { phase2(translogView.snapshot()); } catch (Throwable e) { @@ -289,7 +289,7 @@ public class RecoverySourceHandler { RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK", + logger.warn("{} Remote file corruption during finalization of recovery on node {}. local checksum OK", corruptIndexException, shard.shardId(), request.targetNode()); throw exception; } else { diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java index dcbb0c7bedf..f0a0b13b872 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryTargetService.java @@ -24,10 +24,10 @@ import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -90,8 +90,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve @Inject public RecoveryTargetService(Settings settings, ThreadPool threadPool, TransportService transportService, RecoverySettings - recoverySettings, - ClusterService clusterService) { + recoverySettings, ClusterService clusterService) { super(settings); this.threadPool = threadPool; this.transportService = transportService; @@ -218,7 +217,7 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve "operations") .append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]") .append("\n"); - logger.trace(sb.toString()); + logger.trace("{}", sb); } else { logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), recoveryTarget.sourceNode(), recoveryTime); } @@ -329,9 +328,13 @@ public class RecoveryTargetService extends AbstractComponent implements IndexEve throw exception; } // in very rare cases a translog replay from primary is processed before a mapping update on this node - // which causes local mapping changes. we want to wait until these mappings are processed. + // which causes local mapping changes since the mapping (clusterstate) might not have arrived on this node. + // we want to wait until these mappings are processed but also need to do some maintenance and roll back the + // number of processed (completed) operations in this batch to ensure accounting is correct. logger.trace("delaying recovery due to missing mapping changes (rolling back stats for [{}] ops)", exception, exception .completedOperations()); + final RecoveryState.Translog translog = recoveryTarget.state().getTranslog(); + translog.decrementRecoveredOperations(exception.completedOperations()); // do the maintainance and rollback competed ops // we do not need to use a timeout here since the entire recovery mechanism has an inactivity protection (it will be // canceled) observer.waitForNextChange(new ClusterStateObserver.Listener() { diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index d0aec817ee9..d2db41a7a0c 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -21,21 +21,23 @@ package org.elasticsearch.indices.store; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -57,7 +59,6 @@ import org.elasticsearch.transport.TransportService; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.concurrent.TimeUnit; @@ -69,7 +70,9 @@ import java.util.concurrent.atomic.AtomicInteger; public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service - public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); + public static final Setting INDICES_STORE_DELETE_SHARD_TIMEOUT = + Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), + Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); private final IndicesService indicesService; @@ -113,7 +116,13 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe if (shardCanBeDeleted(event.state(), indexShardRoutingTable)) { ShardId shardId = indexShardRoutingTable.shardId(); IndexService indexService = indicesService.indexService(indexRoutingTable.getIndex()); - IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(event.state().getMetaData().index(indexRoutingTable.getIndex()), settings); + final IndexSettings indexSettings; + if (indexService == null) { + IndexMetaData indexMetaData = event.state().getMetaData().getIndexSafe(indexRoutingTable.getIndex()); + indexSettings = new IndexSettings(indexMetaData, settings); + } else { + indexSettings = indexService.getIndexSettings(); + } if (indicesService.canDeleteShardContent(shardId, indexSettings)) { deleteShardIfExistElseWhere(event.state(), indexShardRoutingTable); } @@ -162,7 +171,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTable indexShardRoutingTable) { List> requests = new ArrayList<>(indexShardRoutingTable.size()); - String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getIndexUUID(); + String indexUUID = indexShardRoutingTable.shardId().getIndex().getUUID(); ClusterName clusterName = state.getClusterName(); for (ShardRouting shardRouting : indexShardRoutingTable) { // Node can't be null, because otherwise shardCanBeDeleted() would have returned false @@ -348,7 +357,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe return null; } ShardId shardId = request.shardId; - IndexService indexService = indicesService.indexService(shardId.getIndexName()); + IndexService indexService = indicesService.indexService(shardId.getIndex()); if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { return indexService.getShardOrNull(shardId.id()); } diff --git a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index bcc2d7f74c4..35a34ebea1b 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/core/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -29,11 +29,11 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -126,7 +126,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction { - public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); + public static final Setting INDICES_TTL_INTERVAL_SETTING = + Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); private final ClusterService clusterService; private final IndicesService indicesService; @@ -159,7 +162,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent, ToXContent { + private final Stats totalStats; + private final Map statsPerPipeline; + + public IngestStats(StreamInput in) throws IOException { + this.totalStats = new Stats(in); + int size = in.readVInt(); + this.statsPerPipeline = new HashMap<>(size); + for (int i = 0; i < size; i++) { + statsPerPipeline.put(in.readString(), new Stats(in)); + } + } + + public IngestStats(Stats totalStats, Map statsPerPipeline) { + this.totalStats = totalStats; + this.statsPerPipeline = statsPerPipeline; + } + + /** + * @return The accumulated stats for all pipelines + */ + public Stats getTotalStats() { + return totalStats; + } + + /** + * @return The stats on a per pipeline basis + */ + public Map getStatsPerPipeline() { + return statsPerPipeline; + } + + @Override + public IngestStats readFrom(StreamInput in) throws IOException { + return new IngestStats(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + totalStats.writeTo(out); + out.writeVLong(statsPerPipeline.size()); + for (Map.Entry entry : statsPerPipeline.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("ingest"); + builder.startObject("total"); + totalStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("pipelines"); + for (Map.Entry entry : statsPerPipeline.entrySet()) { + builder.startObject(entry.getKey()); + entry.getValue().toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + public static class Stats implements Writeable, ToXContent { + + private final long ingestCount; + private final long ingestTimeInMillis; + private final long ingestCurrent; + private final long ingestFailedCount; + + public Stats(StreamInput in) throws IOException { + ingestCount = in.readVLong(); + ingestTimeInMillis = in.readVLong(); + ingestCurrent = in.readVLong(); + ingestFailedCount = in.readVLong(); + } + + public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) { + this.ingestCount = ingestCount; + this.ingestTimeInMillis = ingestTimeInMillis; + this.ingestCurrent = ingestCurrent; + this.ingestFailedCount = ingestFailedCount; + } + + /** + * @return The total number of executed ingest preprocessing operations. + */ + public long getIngestCount() { + return ingestCount; + } + + /** + * + * @return The total time spent of ingest preprocessing in millis. + */ + public long getIngestTimeInMillis() { + return ingestTimeInMillis; + } + + /** + * @return The total number of ingest preprocessing operations currently executing. + */ + public long getIngestCurrent() { + return ingestCurrent; + } + + /** + * @return The total number of ingest preprocessing operations that have failed. + */ + public long getIngestFailedCount() { + return ingestFailedCount; + } + + @Override + public Stats readFrom(StreamInput in) throws IOException { + return new Stats(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(ingestCount); + out.writeVLong(ingestTimeInMillis); + out.writeVLong(ingestCurrent); + out.writeVLong(ingestFailedCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("count", ingestCount); + builder.timeValueField("time_in_millis", "time", ingestTimeInMillis, TimeUnit.MILLISECONDS); + builder.field("current", ingestCurrent); + builder.field("failed", ingestFailedCount); + return builder; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 3f0de550782..94c79db30a0 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -19,23 +19,36 @@ package org.elasticsearch.ingest; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.ingest.core.IngestDocument; import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.threadpool.ThreadPool; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.Optional; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; -public class PipelineExecutionService { +public class PipelineExecutionService implements ClusterStateListener { private final PipelineStore store; private final ThreadPool threadPool; + private final StatsHolder totalStats = new StatsHolder(); + private volatile Map statsHolderPerPipeline = Collections.emptyMap(); + public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { this.store = store; this.threadPool = threadPool; @@ -89,29 +102,85 @@ public class PipelineExecutionService { }); } - private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { - String index = indexRequest.index(); - String type = indexRequest.type(); - String id = indexRequest.id(); - String routing = indexRequest.routing(); - String parent = indexRequest.parent(); - String timestamp = indexRequest.timestamp(); - String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString(); - Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap); - pipeline.execute(ingestDocument); + public IngestStats stats() { + Map statsHolderPerPipeline = this.statsHolderPerPipeline; - Map metadataMap = ingestDocument.extractMetadata(); - //it's fine to set all metadata fields all the time, as ingest document holds their starting values - //before ingestion, which might also get modified during ingestion. - indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); - indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); - indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); - indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); - indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); - indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); - indexRequest.source(ingestDocument.getSourceAndMetadata()); + Map statsPerPipeline = new HashMap<>(statsHolderPerPipeline.size()); + for (Map.Entry entry : statsHolderPerPipeline.entrySet()) { + statsPerPipeline.put(entry.getKey(), entry.getValue().createStats()); + } + + return new IngestStats(totalStats.createStats(), statsPerPipeline); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + IngestMetadata ingestMetadata = event.state().getMetaData().custom(IngestMetadata.TYPE); + if (ingestMetadata != null) { + updatePipelineStats(ingestMetadata); + } + } + + void updatePipelineStats(IngestMetadata ingestMetadata) { + boolean changed = false; + Map newStatsPerPipeline = new HashMap<>(statsHolderPerPipeline); + for (String pipeline : newStatsPerPipeline.keySet()) { + if (ingestMetadata.getPipelines().containsKey(pipeline) == false) { + newStatsPerPipeline.remove(pipeline); + changed = true; + } + } + for (String pipeline : ingestMetadata.getPipelines().keySet()) { + if (newStatsPerPipeline.containsKey(pipeline) == false) { + newStatsPerPipeline.put(pipeline, new StatsHolder()); + changed = true; + } + } + + if (changed) { + statsHolderPerPipeline = Collections.unmodifiableMap(newStatsPerPipeline); + } + } + + private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws Exception { + long startTimeInNanos = System.nanoTime(); + // the pipeline specific stat holder may not exist and that is fine: + // (e.g. the pipeline may have been removed while we're ingesting a document + Optional pipelineStats = Optional.ofNullable(statsHolderPerPipeline.get(pipeline.getId())); + try { + totalStats.preIngest(); + pipelineStats.ifPresent(StatsHolder::preIngest); + String index = indexRequest.index(); + String type = indexRequest.type(); + String id = indexRequest.id(); + String routing = indexRequest.routing(); + String parent = indexRequest.parent(); + String timestamp = indexRequest.timestamp(); + String ttl = indexRequest.ttl() == null ? null : indexRequest.ttl().toString(); + Map sourceAsMap = indexRequest.sourceAsMap(); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, timestamp, ttl, sourceAsMap); + pipeline.execute(ingestDocument); + + Map metadataMap = ingestDocument.extractMetadata(); + //it's fine to set all metadata fields all the time, as ingest document holds their starting values + //before ingestion, which might also get modified during ingestion. + indexRequest.index(metadataMap.get(IngestDocument.MetaData.INDEX)); + indexRequest.type(metadataMap.get(IngestDocument.MetaData.TYPE)); + indexRequest.id(metadataMap.get(IngestDocument.MetaData.ID)); + indexRequest.routing(metadataMap.get(IngestDocument.MetaData.ROUTING)); + indexRequest.parent(metadataMap.get(IngestDocument.MetaData.PARENT)); + indexRequest.timestamp(metadataMap.get(IngestDocument.MetaData.TIMESTAMP)); + indexRequest.ttl(metadataMap.get(IngestDocument.MetaData.TTL)); + indexRequest.source(ingestDocument.getSourceAndMetadata()); + } catch (Exception e) { + totalStats.ingestFailed(); + pipelineStats.ifPresent(StatsHolder::ingestFailed); + throw e; + } finally { + long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); + totalStats.postIngest(ingestTimeInMillis); + pipelineStats.ifPresent(statsHolder -> statsHolder.postIngest(ingestTimeInMillis)); + } } private Pipeline getPipeline(String pipelineId) { @@ -121,4 +190,30 @@ public class PipelineExecutionService { } return pipeline; } + + static class StatsHolder { + + private final MeanMetric ingestMetric = new MeanMetric(); + private final CounterMetric ingestCurrent = new CounterMetric(); + private final CounterMetric ingestFailed = new CounterMetric(); + + void preIngest() { + ingestCurrent.inc(); + } + + void postIngest(long ingestTimeInMillis) { + ingestCurrent.dec(); + ingestMetric.inc(ingestTimeInMillis); + } + + void ingestFailed() { + ingestFailed.inc(); + } + + IngestStats.Stats createStats() { + return new IngestStats.Stats(ingestMetric.count(), ingestMetric.sum(), ingestCurrent.count(), ingestFailed.count()); + } + + } + } diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java index ac2df419f55..7e0dc1b4ffa 100644 --- a/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/core/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -28,11 +28,11 @@ import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; @@ -40,7 +40,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.ingest.core.IngestInfo; import org.elasticsearch.ingest.core.Pipeline; import org.elasticsearch.ingest.core.Processor; -import org.elasticsearch.ingest.core.ProcessorInfo; import org.elasticsearch.ingest.core.TemplateService; import org.elasticsearch.script.ScriptService; @@ -51,7 +50,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; public class PipelineStore extends AbstractComponent implements Closeable, ClusterStateListener { diff --git a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java index ddf3781d1a6..16b3aa10a22 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java @@ -28,15 +28,16 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; /** * A Processor that executes a list of other "processors". It executes a separate list of * "onFailureProcessors" when any of the processors throw an {@link Exception}. */ public class CompoundProcessor implements Processor { - static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message"; - static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type"; - static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag"; + public static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message"; + public static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type"; + public static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag"; private final List processors; private final List onFailureProcessors; @@ -84,7 +85,7 @@ public class CompoundProcessor implements Processor { @Override public String getTag() { - return "compound-processor-" + Objects.hash(processors, onFailureProcessors); + return "CompoundProcessor-" + flattenProcessors().stream().map(Processor::getTag).collect(Collectors.joining("-")); } @Override @@ -104,18 +105,27 @@ public class CompoundProcessor implements Processor { } void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) throws Exception { - Map ingestMetadata = ingestDocument.getIngestMetadata(); try { - ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage()); - ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType); - ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag); + putFailureMetadata(ingestDocument, cause, failedProcessorType, failedProcessorTag); for (Processor processor : onFailureProcessors) { processor.execute(ingestDocument); } } finally { - ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD); - ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD); - ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD); + removeFailureMetadata(ingestDocument); } } + + private void putFailureMetadata(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) { + Map ingestMetadata = ingestDocument.getIngestMetadata(); + ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage()); + ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType); + ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag); + } + + private void removeFailureMetadata(IngestDocument ingestDocument) { + Map ingestMetadata = ingestDocument.getIngestMetadata(); + ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD); + ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD); + ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD); + } } diff --git a/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java b/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java index 8625e1d8884..d128732203f 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/IngestInfo.java @@ -22,6 +22,7 @@ package org.elasticsearch.ingest.core; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -32,17 +33,22 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Objects; import java.util.Set; +import java.util.TreeSet; -public class IngestInfo implements Streamable, ToXContent { +public class IngestInfo implements Writeable, ToXContent { - private Set processors; + private final Set processors; - public IngestInfo() { - processors = Collections.emptySet(); + public IngestInfo(StreamInput in) throws IOException { + this(Collections.emptyList()); + final int size = in.readVInt(); + for (int i = 0; i < size; i++) { + processors.add(new ProcessorInfo(in)); + } } public IngestInfo(List processors) { - this.processors = new LinkedHashSet<>(processors); + this.processors = new TreeSet<>(processors); // we use a treeset here to have a test-able / predictable order } public Iterable getProcessors() { @@ -54,15 +60,8 @@ public class IngestInfo implements Streamable, ToXContent { } @Override - public void readFrom(StreamInput in) throws IOException { - int size = in.readVInt(); - Set processors = new LinkedHashSet<>(size); - for (int i = 0; i < size; i++) { - ProcessorInfo info = new ProcessorInfo(); - info.readFrom(in); - processors.add(info); - } - this.processors = processors; + public IngestInfo readFrom(StreamInput in) throws IOException { + return new IngestInfo(in); } @Override diff --git a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java index 821a44c0a96..aaae929e0a9 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java @@ -68,6 +68,13 @@ public final class Pipeline { return description; } + /** + * Get the underlying {@link CompoundProcessor} containing the Pipeline's processors + */ + public CompoundProcessor getCompoundProcessor() { + return compoundProcessor; + } + /** * Unmodifiable list containing each processor that operates on the data. */ diff --git a/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java b/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java index 64c3d19719b..f652b182919 100644 --- a/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java +++ b/core/src/main/java/org/elasticsearch/ingest/core/ProcessorInfo.java @@ -22,16 +22,18 @@ package org.elasticsearch.ingest.core; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -public class ProcessorInfo implements Streamable, ToXContent { +public class ProcessorInfo implements Writeable, ToXContent, Comparable { - private String type; + private final String type; - ProcessorInfo() { + public ProcessorInfo(StreamInput input) throws IOException { + type = input.readString(); } public ProcessorInfo(String type) { @@ -46,8 +48,8 @@ public class ProcessorInfo implements Streamable, ToXContent { } @Override - public void readFrom(StreamInput in) throws IOException { - this.type = in.readString(); + public ProcessorInfo readFrom(StreamInput in) throws IOException { + return new ProcessorInfo(in); } @Override @@ -78,4 +80,9 @@ public class ProcessorInfo implements Streamable, ToXContent { public int hashCode() { return type.hashCode(); } + + @Override + public int compareTo(ProcessorInfo o) { + return type.compareTo(o.type); + } } diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/ForEachProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/ForEachProcessor.java index 5b101fbfb32..b6f6a85d219 100644 --- a/core/src/main/java/org/elasticsearch/ingest/processor/ForEachProcessor.java +++ b/core/src/main/java/org/elasticsearch/ingest/processor/ForEachProcessor.java @@ -59,11 +59,8 @@ public final class ForEachProcessor extends AbstractProcessor { List values = ingestDocument.getFieldValue(field, List.class); List newValues = new ArrayList<>(values.size()); for (Object value : values) { - Map innerSource = new HashMap<>(); - innerSource.put("_value", value); - for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { - innerSource.put(metaData.getFieldName(), ingestDocument.getSourceAndMetadata().get(metaData.getFieldName())); - } + Map innerSource = new HashMap<>(ingestDocument.getSourceAndMetadata()); + innerSource.put("_value", value); // scalar value to access the list item being evaluated IngestDocument innerIngestDocument = new IngestDocument(innerSource, ingestDocument.getIngestMetadata()); for (Processor processor : processors) { processor.execute(innerIngestDocument); diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java new file mode 100644 index 00000000000..af820318d83 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.processor; + +import org.elasticsearch.action.ingest.SimulateProcessorResult; +import org.elasticsearch.ingest.core.CompoundProcessor; +import org.elasticsearch.ingest.core.IngestDocument; +import org.elasticsearch.ingest.core.Processor; + +import java.util.ArrayList; +import java.util.List; + +/** + * Processor to be used within Simulate API to keep track of processors executed in pipeline. + */ +public final class TrackingResultProcessor implements Processor { + + private final Processor actualProcessor; + private final List processorResultList; + + public TrackingResultProcessor(Processor actualProcessor, List processorResultList) { + this.processorResultList = processorResultList; + if (actualProcessor instanceof CompoundProcessor) { + CompoundProcessor trackedCompoundProcessor = decorate((CompoundProcessor) actualProcessor, processorResultList); + this.actualProcessor = trackedCompoundProcessor; + } else { + this.actualProcessor = actualProcessor; + } + } + + @Override + public void execute(IngestDocument ingestDocument) throws Exception { + try { + actualProcessor.execute(ingestDocument); + processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument))); + } catch (Exception e) { + processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e)); + throw e; + } + } + + @Override + public String getType() { + return actualProcessor.getType(); + } + + @Override + public String getTag() { + return actualProcessor.getTag(); + } + + public static CompoundProcessor decorate(CompoundProcessor compoundProcessor, List processorResultList) { + List processors = new ArrayList<>(compoundProcessor.getProcessors().size()); + for (Processor processor : compoundProcessor.getProcessors()) { + if (processor instanceof CompoundProcessor) { + processors.add(decorate((CompoundProcessor) processor, processorResultList)); + } else { + processors.add(new TrackingResultProcessor(processor, processorResultList)); + } + } + List onFailureProcessors = new ArrayList<>(compoundProcessor.getProcessors().size()); + for (Processor processor : compoundProcessor.getOnFailureProcessors()) { + if (processor instanceof CompoundProcessor) { + onFailureProcessors.add(decorate((CompoundProcessor) processor, processorResultList)); + } else { + onFailureProcessors.add(new TrackingResultProcessor(processor, processorResultList)); + } + } + return new CompoundProcessor(processors, onFailureProcessors); + } +} + diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java index 99a78f13a07..0287d5c522c 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.fs; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -37,7 +38,8 @@ public class FsService extends AbstractComponent { private final SingleObjectCache fsStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.fs.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public FsService(Settings settings, NodeEnvironment nodeEnvironment) throws IOException { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index 97c813a0fe3..5a2d591c7dc 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -21,7 +21,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Scope; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -47,12 +47,14 @@ public class JvmGcMonitorService extends AbstractLifecycleComponent ENABLED_SETTING = Setting.boolSetting("monitor.jvm.gc.enabled", true, false, Scope.CLUSTER); + public final static Setting ENABLED_SETTING = + Setting.boolSetting("monitor.jvm.gc.enabled", true, Property.NodeScope); public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Scope.CLUSTER); + Setting.timeSetting("monitor.jvm.gc.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); private static String GC_COLLECTOR_PREFIX = "monitor.jvm.gc.collector."; - public final static Setting GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, false, Scope.CLUSTER); + public final static Setting GC_SETTING = Setting.groupSetting(GC_COLLECTOR_PREFIX, Property.NodeScope); static class GcThreshold { public final String name; diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java index fbec6cda168..e91c05e75ac 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.jvm; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -36,7 +37,8 @@ public class JvmService extends AbstractComponent { private JvmStats jvmStats; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.jvm.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public JvmService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java index 5f836c6f928..d452094d7b0 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.os; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -38,7 +39,8 @@ public class OsService extends AbstractComponent { private SingleObjectCache osStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public OsService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index 9e3283af4fc..30c24f34c66 100644 --- a/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/core/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -21,6 +21,7 @@ package org.elasticsearch.monitor.process; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; @@ -35,7 +36,8 @@ public final class ProcessService extends AbstractComponent { private final SingleObjectCache processStatsCache; public final static Setting REFRESH_INTERVAL_SETTING = - Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), false, Setting.Scope.CLUSTER); + Setting.timeSetting("monitor.process.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), + Property.NodeScope); public ProcessService(Settings settings) { super(settings); diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index e279d3e819f..6e270ffc3ff 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -30,14 +30,15 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClientModule; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterNameModule; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.MasterNodeChangePredicate; +import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; @@ -53,6 +54,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -81,8 +83,6 @@ import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.percolator.PercolatorModule; -import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; @@ -129,17 +129,23 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class Node implements Closeable { - public static final Setting WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_MASTER_SETTING = Setting.boolSetting("node.master", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_LOCAL_SETTING = Setting.boolSetting("node.local", false, false, Setting.Scope.CLUSTER); - public static final Setting NODE_MODE_SETTING = new Setting<>("node.mode", "network", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, false, Setting.Scope.CLUSTER); - public static final Setting NODE_NAME_SETTING = Setting.simpleString("node.name", false, Setting.Scope.CLUSTER); + public static final Setting WRITE_PORTS_FIELD_SETTING = + Setting.boolSetting("node.portsfile", false, Property.NodeScope); + public static final Setting NODE_CLIENT_SETTING = + Setting.boolSetting("node.client", false, Property.NodeScope); + public static final Setting NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope); + public static final Setting NODE_MASTER_SETTING = + Setting.boolSetting("node.master", true, Property.NodeScope); + public static final Setting NODE_LOCAL_SETTING = + Setting.boolSetting("node.local", false, Property.NodeScope); + public static final Setting NODE_MODE_SETTING = + new Setting<>("node.mode", "network", Function.identity(), Property.NodeScope); + public static final Setting NODE_INGEST_SETTING = + Setting.boolSetting("node.ingest", true, Property.NodeScope); + public static final Setting NODE_NAME_SETTING = Setting.simpleString("node.name", Property.NodeScope); // this sucks that folks can mistype client etc and get away with it. // TODO: we should move this to node.attribute.${name} = ${value} instead. - public static final Setting NODE_ATTRIBUTES = Setting.groupSetting("node.", false, Setting.Scope.CLUSTER); + public static final Setting NODE_ATTRIBUTES = Setting.groupSetting("node.", Property.NodeScope); private static final String CLIENT_TYPE = "node"; @@ -217,7 +223,6 @@ public class Node implements Closeable { modules.add(new ActionModule(DiscoveryNode.ingestNode(settings), false)); modules.add(new GatewayModule(settings)); modules.add(new NodeClientModule()); - modules.add(new PercolatorModule()); modules.add(new ResourceWatcherModule()); modules.add(new RepositoriesModule()); modules.add(new TribeModule()); @@ -290,9 +295,11 @@ public class Node implements Closeable { injector.getInstance(MonitorService.class).start(); injector.getInstance(RestController.class).start(); - assert injector.getInstance(ClusterService.class) instanceof InternalClusterService : - "node cluster service implementation must inherit from InternalClusterService"; - final InternalClusterService clusterService = (InternalClusterService) injector.getInstance(ClusterService.class); + final ClusterService clusterService = injector.getInstance(ClusterService.class); + + final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class); + nodeConnectionsService.start(); + clusterService.setNodeConnectionsService(nodeConnectionsService); // TODO hack around circular dependencies problems injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class)); @@ -311,6 +318,15 @@ public class Node implements Closeable { // Start the transport service now so the publish address will be added to the local disco node in ClusterService TransportService transportService = injector.getInstance(TransportService.class); transportService.start(); + DiscoveryNode localNode = injector.getInstance(DiscoveryNodeService.class) + .buildLocalNode(transportService.boundAddress().publishAddress()); + + // TODO: need to find a cleaner way to start/construct a service with some initial parameters, + // playing nice with the life cycle interfaces + clusterService.setLocalNode(localNode); + transportService.setLocalNode(localNode); + clusterService.add(transportService.getTaskManager()); + clusterService.start(); // start after cluster service so the local disco is known @@ -392,6 +408,7 @@ public class Node implements Closeable { injector.getInstance(RoutingService.class).stop(); injector.getInstance(ClusterService.class).stop(); injector.getInstance(Discovery.class).stop(); + injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(MonitorService.class).stop(); injector.getInstance(GatewayService.class).stop(); injector.getInstance(SearchService.class).stop(); @@ -449,6 +466,8 @@ public class Node implements Closeable { toClose.add(injector.getInstance(RoutingService.class)); toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); + toClose.add(() -> stopWatch.stop().start("node_connections_service")); + toClose.add(injector.getInstance(NodeConnectionsService.class)); toClose.add(() -> stopWatch.stop().start("discovery")); toClose.add(injector.getInstance(Discovery.class)); toClose.add(() -> stopWatch.stop().start("monitor")); @@ -461,8 +480,6 @@ public class Node implements Closeable { toClose.add(injector.getInstance(RestController.class)); toClose.add(() -> stopWatch.stop().start("transport")); toClose.add(injector.getInstance(TransportService.class)); - toClose.add(() -> stopWatch.stop().start("percolator_service")); - toClose.add(injector.getInstance(PercolatorService.class)); for (Class plugin : pluginsService.nodeServices()) { toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getName() + ")")); diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index faf449586c1..8864a70ccdc 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -23,9 +23,10 @@ import org.elasticsearch.bootstrap.BootstrapInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.env.Environment; @@ -52,12 +53,13 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; public class InternalSettingsPreparer { private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json", ".properties"}; - static final String[] PROPERTY_PREFIXES = {"es.", "elasticsearch."}; - static final String[] PROPERTY_DEFAULTS_PREFIXES = {"es.default.", "elasticsearch.default."}; + static final String PROPERTY_PREFIX = "es."; + static final String PROPERTY_DEFAULTS_PREFIX = "es.default."; public static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; public static final String TEXT_PROMPT_VALUE = "${prompt.text}"; - public static final Setting IGNORE_SYSTEM_PROPERTIES_SETTING = Setting.boolSetting("config.ignore_system_properties", false, false, Setting.Scope.CLUSTER); + public static final Setting IGNORE_SYSTEM_PROPERTIES_SETTING = + Setting.boolSetting("config.ignore_system_properties", false, Property.NodeScope); /** * Prepares the settings by gathering all elasticsearch system properties and setting defaults. @@ -124,13 +126,9 @@ public class InternalSettingsPreparer { output.put(input); if (useSystemProperties(input)) { if (loadDefaults) { - for (String prefix : PROPERTY_DEFAULTS_PREFIXES) { - output.putProperties(prefix, BootstrapInfo.getSystemProperties()); - } - } - for (String prefix : PROPERTY_PREFIXES) { - output.putProperties(prefix, BootstrapInfo.getSystemProperties(), PROPERTY_DEFAULTS_PREFIXES); + output.putProperties(PROPERTY_DEFAULTS_PREFIX, BootstrapInfo.getSystemProperties()); } + output.putProperties(PROPERTY_PREFIX, BootstrapInfo.getSystemProperties(), PROPERTY_DEFAULTS_PREFIX); } output.replacePropertyPlaceholders(); } diff --git a/core/src/main/java/org/elasticsearch/node/service/NodeService.java b/core/src/main/java/org/elasticsearch/node/service/NodeService.java index 88b2fe48868..cb11fc02443 100644 --- a/core/src/main/java/org/elasticsearch/node/service/NodeService.java +++ b/core/src/main/java/org/elasticsearch/node/service/NodeService.java @@ -24,7 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -90,6 +90,7 @@ public class NodeService extends AbstractComponent implements Closeable { this.ingestService = new IngestService(settings, threadPool, processorsRegistryBuilder); this.settingsFilter = settingsFilter; clusterService.add(ingestService.getPipelineStore()); + clusterService.add(ingestService.getPipelineExecutionService()); } // can not use constructor injection or there will be a circular dependency @@ -165,13 +166,14 @@ public class NodeService extends AbstractComponent implements Closeable { httpServer == null ? null : httpServer.stats(), circuitBreakerService.stats(), scriptService.stats(), - discovery.stats() + discovery.stats(), + ingestService.getPipelineExecutionService().stats() ); } public NodeStats stats(CommonStatsFlags indices, boolean os, boolean process, boolean jvm, boolean threadPool, boolean fs, boolean transport, boolean http, boolean circuitBreaker, - boolean script, boolean discoveryStats) { + boolean script, boolean discoveryStats, boolean ingest) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) return new NodeStats(discovery.localNode(), System.currentTimeMillis(), @@ -185,7 +187,8 @@ public class NodeService extends AbstractComponent implements Closeable { http ? (httpServer == null ? null : httpServer.stats()) : null, circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, - discoveryStats ? discovery.stats() : null + discoveryStats ? discovery.stats() : null, + ingest ? ingestService.getPipelineExecutionService().stats() : null ); } diff --git a/core/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java b/core/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java deleted file mode 100644 index 9d091a4c0bd..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -package org.elasticsearch.percolator; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.SlowCompositeReaderWrapper; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; -import java.util.List; - - -/** - * Implementation of {@link PercolatorIndex} that can hold multiple Lucene documents by - * opening multiple {@link MemoryIndex} based IndexReaders and wrapping them via a single top level reader. - */ -class MultiDocumentPercolatorIndex implements PercolatorIndex { - - private final CloseableThreadLocal cache; - - MultiDocumentPercolatorIndex(CloseableThreadLocal cache) { - this.cache = cache; - } - - @Override - public void prepare(PercolateContext context, ParsedDocument parsedDocument) { - IndexReader[] memoryIndices = new IndexReader[parsedDocument.docs().size()]; - List docs = parsedDocument.docs(); - int rootDocIndex = docs.size() - 1; - assert rootDocIndex > 0; - MemoryIndex rootDocMemoryIndex = null; - for (int i = 0; i < docs.size(); i++) { - ParseContext.Document d = docs.get(i); - MemoryIndex memoryIndex; - if (rootDocIndex == i) { - // the last doc is always the rootDoc, since that is usually the biggest document it make sense - // to reuse the MemoryIndex it uses - memoryIndex = rootDocMemoryIndex = cache.get(); - } else { - memoryIndex = new MemoryIndex(true); - } - memoryIndices[i] = indexDoc(d, memoryIndex, context, parsedDocument).createSearcher().getIndexReader(); - } - try { - MultiReader mReader = new MultiReader(memoryIndices, true); - LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); - final IndexSearcher slowSearcher = new IndexSearcher(slowReader) { - - @Override - public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(query, BooleanClause.Occur.MUST); - bq.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT); - return super.createNormalizedWeight(bq.build(), needsScores); - } - - }; - slowSearcher.setQueryCache(null); - DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex); - context.initialize(docSearcher, parsedDocument); - } catch (IOException e) { - throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); - } - } - - MemoryIndex indexDoc(ParseContext.Document d, MemoryIndex memoryIndex, PercolateContext context, ParsedDocument parsedDocument) { - for (IndexableField field : d.getFields()) { - Analyzer analyzer = context.analysisService().defaultIndexAnalyzer(); - DocumentMapper documentMapper = context.mapperService().documentMapper(parsedDocument.type()); - if (documentMapper != null && documentMapper.mappers().getMapper(field.name()) != null) { - analyzer = documentMapper.mappers().indexAnalyzer(); - } - if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { - continue; - } - try { - // TODO: instead of passing null here, we can have a CTL> and pass previous, - // like the indexer does - try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { - if (tokenStream != null) { - memoryIndex.addField(field.name(), tokenStream, field.boost()); - } - } - } catch (IOException e) { - throw new ElasticsearchException("Failed to create token stream", e); - } - } - return memoryIndex; - } - - private class DocSearcher extends Engine.Searcher { - - private final MemoryIndex rootDocMemoryIndex; - - private DocSearcher(IndexSearcher searcher, MemoryIndex rootDocMemoryIndex) { - super("percolate", searcher); - this.rootDocMemoryIndex = rootDocMemoryIndex; - } - - @Override - public void close() { - try { - this.reader().close(); - rootDocMemoryIndex.reset(); - } catch (IOException e) { - throw new ElasticsearchException("failed to close IndexReader in percolator with nested doc", e); - } - } - - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java deleted file mode 100644 index f73c8f31a07..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ /dev/null @@ -1,691 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.util.Counter; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHitField; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.SearchContextAggregations; -import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.fetch.FetchSubPhaseContext; -import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; -import org.elasticsearch.search.fetch.script.ScriptFieldsContext; -import org.elasticsearch.search.fetch.source.FetchSourceContext; -import org.elasticsearch.search.highlight.SearchContextHighlight; -import org.elasticsearch.search.internal.ContextIndexSearcher; -import org.elasticsearch.search.internal.InternalSearchHit; -import org.elasticsearch.search.internal.InternalSearchHitField; -import org.elasticsearch.search.internal.ScrollContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.lookup.LeafSearchLookup; -import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.profile.Profilers; -import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.rescore.RescoreSearchContext; -import org.elasticsearch.search.suggest.SuggestionSearchContext; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - */ -public class PercolateContext extends SearchContext { - - private int size = 10; - private boolean trackScores; - - private final SearchShardTarget searchShardTarget; - private final IndexService indexService; - private final IndexFieldDataService fieldDataService; - private final IndexShard indexShard; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; - private final ScriptService scriptService; - private final MapperService mapperService; - private final int numberOfShards; - private final Query aliasFilter; - private final long originNanoTime = System.nanoTime(); - private final long startTime; - private final boolean onlyCount; - private Engine.Searcher docSearcher; - private Engine.Searcher engineSearcher; - private ContextIndexSearcher searcher; - - private SearchContextHighlight highlight; - private ParsedQuery parsedQuery; - private Query query; - private Query percolateQuery; - private FetchSubPhase.HitContext hitContext; - private SearchContextAggregations aggregations; - private QuerySearchResult querySearchResult; - private Sort sort; - private final Map subPhaseContexts = new HashMap<>(); - private final QueryShardContext queryShardContext; - private final Map, Collector> queryCollectors = new HashMap<>(); - private SearchLookup searchLookup; - private final FetchPhase fetchPhase; - - public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, - IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, - Query aliasFilter, ParseFieldMatcher parseFieldMatcher, FetchPhase fetchPhase) { - super(parseFieldMatcher); - this.indexShard = indexShard; - this.indexService = indexService; - this.fetchPhase = fetchPhase; - this.fieldDataService = indexService.fieldData(); - this.mapperService = indexService.mapperService(); - this.searchShardTarget = searchShardTarget; - this.pageCacheRecycler = pageCacheRecycler; - this.bigArrays = bigArrays.withCircuitBreaking(); - this.querySearchResult = new QuerySearchResult(0, searchShardTarget); - this.engineSearcher = indexShard.acquireSearcher("percolate"); - this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); - this.scriptService = scriptService; - this.numberOfShards = request.getNumberOfShards(); - this.aliasFilter = aliasFilter; - this.startTime = request.getStartTime(); - this.onlyCount = request.onlyCount(); - queryShardContext = indexService.newQueryShardContext(); - queryShardContext.setTypes(request.documentType()); - } - - // for testing: - PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, MapperService mapperService, QueryShardContext queryShardContext) { - super(null); - this.searchShardTarget = searchShardTarget; - this.mapperService = mapperService; - this.indexService = null; - this.indexShard = null; - this.fieldDataService = null; - this.pageCacheRecycler = null; - this.bigArrays = null; - this.scriptService = null; - this.aliasFilter = null; - this.startTime = 0; - this.numberOfShards = 0; - this.onlyCount = true; - this.queryShardContext = queryShardContext; - this.fetchPhase = null; - } - - public IndexSearcher docSearcher() { - return docSearcher.searcher(); - } - - public void initialize(Engine.Searcher docSearcher, ParsedDocument parsedDocument) { - this.docSearcher = docSearcher; - IndexReader indexReader = docSearcher.reader(); - LeafReaderContext atomicReaderContext = indexReader.leaves().get(0); - this.searchLookup = new SearchLookup(mapperService(), fieldData(), queryShardContext.getTypes()); - LeafSearchLookup leafLookup = searchLookup.getLeafSearchLookup(atomicReaderContext); - leafLookup.setDocument(0); - leafLookup.source().setSource(parsedDocument.source()); - - Map fields = new HashMap<>(); - for (IndexableField field : parsedDocument.rootDoc().getFields()) { - fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); - } - hitContext().reset( - new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields), - atomicReaderContext, 0, docSearcher.searcher() - ); - } - - @Override - public IndexShard indexShard() { - return indexShard; - } - - public IndexService indexService() { - return indexService; - } - - public Query percolateQuery() { - return percolateQuery; - } - - public void percolateQuery(Query percolateQuery) { - this.percolateQuery = percolateQuery; - } - - public FetchSubPhase.HitContext hitContext() { - if (hitContext == null) { - hitContext = new FetchSubPhase.HitContext(); - } - return hitContext; - } - - public boolean isOnlyCount() { - return onlyCount; - } - - public Query percolatorTypeFilter(){ - return indexService().mapperService().documentMapper(PercolatorService.TYPE_NAME).typeFilter(); - } - - @Override - public SearchContextHighlight highlight() { - return highlight; - } - - @Override - public void highlight(SearchContextHighlight highlight) { - if (highlight != null) { - // Enforce highlighting by source, because MemoryIndex doesn't support stored fields. - highlight.globalForceSource(true); - } - this.highlight = highlight; - } - - @Override - public SearchShardTarget shardTarget() { - return searchShardTarget; - } - - @Override - public SearchLookup lookup() { - // we cache this since it's really just a single document lookup - check the init method for details - assert searchLookup != null : "context is not initialized"; - assert Arrays.equals(searchLookup.doc().getTypes(), getQueryShardContext().getTypes()) : "types mismatch - can't return lookup"; - return this.searchLookup; - } - - @Override - protected void doClose() { - Releasables.close(engineSearcher, docSearcher); - } - - @Override - public MapperService mapperService() { - return mapperService; - } - - @Override - public SearchContext parsedQuery(ParsedQuery query) { - this.parsedQuery = query; - this.query = query.query(); - return this; - } - - @Override - public ParsedQuery parsedQuery() { - return parsedQuery; - } - - @Override - public Query query() { - return query; - } - - @Override - public IndexFieldDataService fieldData() { - return fieldDataService; - } - - @Override - public SearchContextAggregations aggregations() { - return aggregations; - } - - @Override - public SearchContext aggregations(SearchContextAggregations aggregations) { - this.aggregations = aggregations; - return this; - } - - @Override - public SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory contextFactory) { - String subPhaseName = contextFactory.getName(); - if (subPhaseContexts.get(subPhaseName) == null) { - subPhaseContexts.put(subPhaseName, contextFactory.newContextInstance()); - } - return (SubPhaseContext) subPhaseContexts.get(subPhaseName); - } - - // Unused: - @Override - public void preProcess() { - throw new UnsupportedOperationException(); - } - - @Override - public Query searchFilter(String[] types) { - return aliasFilter(); - } - - @Override - public long id() { - throw new UnsupportedOperationException(); - } - - @Override - public String source() { - throw new UnsupportedOperationException(); - } - - @Override - public ShardSearchRequest request() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchType searchType() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext searchType(SearchType searchType) { - throw new UnsupportedOperationException(); - } - - @Override - public int numberOfShards() { - return numberOfShards; - } - - @Override - public float queryBoost() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext queryBoost(float queryBoost) { - throw new UnsupportedOperationException(); - } - - @Override - public long getOriginNanoTime() { - return originNanoTime; - } - - @Override - protected long nowInMillisImpl() { - return startTime; - } - - @Override - public ScrollContext scrollContext() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext scrollContext(ScrollContext scroll) { - throw new UnsupportedOperationException(); - } - - @Override - public SuggestionSearchContext suggest() { - throw new UnsupportedOperationException(); - } - - @Override - public void suggest(SuggestionSearchContext suggest) { - throw new UnsupportedOperationException(); - } - - @Override - public List rescore() { - throw new UnsupportedOperationException(); - } - - @Override - public void addRescore(RescoreSearchContext rescore) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasScriptFields() { - throw new UnsupportedOperationException(); - } - - @Override - public ScriptFieldsContext scriptFields() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean sourceRequested() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasFetchSourceContext() { - throw new UnsupportedOperationException(); - } - - @Override - public FetchSourceContext fetchSourceContext() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) { - throw new UnsupportedOperationException(); - } - - @Override - public ContextIndexSearcher searcher() { - return searcher; - } - - @Override - public AnalysisService analysisService() { - return indexService.analysisService(); - } - - @Override - public SimilarityService similarityService() { - return indexService.similarityService(); - } - - @Override - public ScriptService scriptService() { - return scriptService; - } - - @Override - public PageCacheRecycler pageCacheRecycler() { - return pageCacheRecycler; - } - - @Override - public BigArrays bigArrays() { - return bigArrays; - } - - @Override - public BitsetFilterCache bitsetFilterCache() { - return indexService.cache().bitsetFilterCache(); - } - - @Override - public long timeoutInMillis() { - return -1; - } - - @Override - public void timeoutInMillis(long timeoutInMillis) { - throw new UnsupportedOperationException(); - } - - @Override - public int terminateAfter() { - return DEFAULT_TERMINATE_AFTER; - } - - @Override - public void terminateAfter(int terminateAfter) { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext minimumScore(float minimumScore) { - throw new UnsupportedOperationException(); - } - - @Override - public Float minimumScore() { - return null; - } - - @Override - public SearchContext sort(Sort sort) { - this.sort = sort; - return this; - } - - @Override - public Sort sort() { - return sort; - } - - @Override - public SearchContext trackScores(boolean trackScores) { - this.trackScores = trackScores; - return this; - } - - @Override - public boolean trackScores() { - return trackScores; - } - - @Override - public SearchContext searchAfter(FieldDoc searchAfter) { - throw new UnsupportedOperationException(); - } - - @Override - public FieldDoc searchAfter() { - return null; - } - - @Override - public SearchContext parsedPostFilter(ParsedQuery postFilter) { - throw new UnsupportedOperationException(); - } - - @Override - public ParsedQuery parsedPostFilter() { - return null; - } - - @Override - public Query aliasFilter() { - return aliasFilter; - } - - @Override - public int from() { - return 0; - } - - @Override - public SearchContext from(int from) { - throw new UnsupportedOperationException(); - } - - @Override - public int size() { - return size; - } - - @Override - public SearchContext size(int size) { - this.size = size; - return this; - } - - @Override - public boolean hasFieldNames() { - throw new UnsupportedOperationException(); - } - - @Override - public List fieldNames() { - throw new UnsupportedOperationException(); - } - - @Override - public void emptyFieldNames() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean explain() { - throw new UnsupportedOperationException(); - } - - @Override - public void explain(boolean explain) { - throw new UnsupportedOperationException(); - } - - @Override - public List groupStats() { - throw new UnsupportedOperationException(); - } - - @Override - public void groupStats(List groupStats) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean version() { - throw new UnsupportedOperationException(); - } - - @Override - public void version(boolean version) { - throw new UnsupportedOperationException(); - } - - @Override - public int[] docIdsToLoad() { - throw new UnsupportedOperationException(); - } - - @Override - public int docIdsToLoadFrom() { - throw new UnsupportedOperationException(); - } - - @Override - public int docIdsToLoadSize() { - throw new UnsupportedOperationException(); - } - - @Override - public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) { - throw new UnsupportedOperationException(); - } - - @Override - public void accessed(long accessTime) { - throw new UnsupportedOperationException(); - } - - @Override - public long lastAccessTime() { - throw new UnsupportedOperationException(); - } - - @Override - public long keepAlive() { - throw new UnsupportedOperationException(); - } - - @Override - public void keepAlive(long keepAlive) { - throw new UnsupportedOperationException(); - } - - @Override - public DfsSearchResult dfsResult() { - throw new UnsupportedOperationException(); - } - - @Override - public QuerySearchResult queryResult() { - return querySearchResult; - } - - @Override - public FetchSearchResult fetchResult() { - throw new UnsupportedOperationException(); - } - - @Override - public FetchPhase fetchPhase() { - return fetchPhase; - } - - @Override - public MappedFieldType smartNameFieldType(String name) { - return mapperService().fullName(name); - } - - @Override - public ObjectMapper getObjectMapper(String name) { - throw new UnsupportedOperationException(); - } - - @Override - public Counter timeEstimateCounter() { - throw new UnsupportedOperationException(); - } - - @Override - public InnerHitsContext innerHits() { - throw new UnsupportedOperationException(); - } - - @Override - public Map, Collector> queryCollectors() { - return queryCollectors; - } - - @Override - public QueryShardContext getQueryShardContext() { - return queryShardContext; - } - - @Override - public Profilers getProfilers() { - throw new UnsupportedOperationException(); - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java b/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java deleted file mode 100644 index 50db3cecaa6..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateDocumentParser.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.DocumentMapperForType; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.search.SearchParseElement; -import org.elasticsearch.search.aggregations.AggregationPhase; -import org.elasticsearch.search.highlight.HighlightPhase; -import org.elasticsearch.search.sort.SortParseElement; - -import java.util.Map; - -import static org.elasticsearch.index.mapper.SourceToParse.source; - -public class PercolateDocumentParser { - - private final HighlightPhase highlightPhase; - private final SortParseElement sortParseElement; - private final AggregationPhase aggregationPhase; - - @Inject - public PercolateDocumentParser(HighlightPhase highlightPhase, SortParseElement sortParseElement, - AggregationPhase aggregationPhase) { - this.highlightPhase = highlightPhase; - this.sortParseElement = sortParseElement; - this.aggregationPhase = aggregationPhase; - } - - public ParsedDocument parse(final PercolateShardRequest request, final PercolateContext context, final MapperService mapperService) { - BytesReference source = request.source(); - if (source == null || source.length() == 0) { - if (request.docSource() != null && request.docSource().length() != 0) { - return parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndexName(), request.documentType()); - } else { - return null; - } - } - - // TODO: combine all feature parse elements into one map - Map hlElements = highlightPhase.parseElements(); - Map aggregationElements = aggregationPhase.parseElements(); - final QueryShardContext queryShardContext = context.getQueryShardContext(); - ParsedDocument doc = null; - // Some queries (function_score query when for decay functions) rely on a SearchContext being set: - // We switch types because this context needs to be in the context of the percolate queries in the shard and - // not the in memory percolate doc - final String[] previousTypes = queryShardContext.getTypes(); - queryShardContext.setTypes(PercolatorService.TYPE_NAME); - try (XContentParser parser = XContentFactory.xContent(source).createParser(source);) { - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - // we need to check the "doc" here, so the next token will be START_OBJECT which is - // the actual document starting - if ("doc".equals(currentFieldName)) { - if (doc != null) { - throw new ElasticsearchParseException("Either specify doc or get, not both"); - } - - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(request.documentType()); - String index = context.shardTarget().index(); - doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).id("_id_for_percolate_api")); - if (docMapper.getMapping() != null) { - doc.addDynamicMappingsUpdate(docMapper.getMapping()); - } - // the document parsing exists the "doc" object, so we need to set the new current field. - currentFieldName = parser.currentName(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - SearchParseElement element = hlElements.get(currentFieldName); - if (element == null) { - element = aggregationElements.get(currentFieldName); - } - - if ("query".equals(currentFieldName)) { - if (context.percolateQuery() != null) { - throw new ElasticsearchParseException("Either specify query or filter, not both"); - } - context.percolateQuery(queryShardContext.parse(parser).query()); - } else if ("filter".equals(currentFieldName)) { - if (context.percolateQuery() != null) { - throw new ElasticsearchParseException("Either specify query or filter, not both"); - } - Query filter = queryShardContext.parseInnerFilter(parser).query(); - context.percolateQuery(new ConstantScoreQuery(filter)); - } else if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } else if (element != null) { - element.parse(parser, context); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } - } else if (token == null) { - break; - } else if (token.isValue()) { - if ("size".equals(currentFieldName)) { - context.size(parser.intValue()); - if (context.size() < 0) { - throw new ElasticsearchParseException("size is set to [{}] and is expected to be higher or equal to 0", context.size()); - } - } else if ("sort".equals(currentFieldName)) { - parseSort(parser, context); - } else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) { - context.trackScores(parser.booleanValue()); - } - } - } - - // We need to get the actual source from the request body for highlighting, so parse the request body again - // and only get the doc source. - if (context.highlight() != null) { - parser.close(); - currentFieldName = null; - try (XContentParser parserForHighlighter = XContentFactory.xContent(source).createParser(source)) { - token = parserForHighlighter.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - while ((token = parserForHighlighter.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parserForHighlighter.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("doc".equals(currentFieldName)) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.SMILE, bStream); - builder.copyCurrentStructure(parserForHighlighter); - builder.close(); - doc.setSource(bStream.bytes()); - break; - } else { - parserForHighlighter.skipChildren(); - } - } else if (token == null) { - break; - } - } - } - } - - } catch (Throwable e) { - throw new ElasticsearchParseException("failed to parse request", e); - } finally { - queryShardContext.setTypes(previousTypes); - } - - if (request.docSource() != null && request.docSource().length() != 0) { - if (doc != null) { - throw new IllegalArgumentException("Can't specify the document to percolate in the source of the request and as document id"); - } - - doc = parseFetchedDoc(context, request.docSource(), mapperService, request.shardId().getIndexName(), request.documentType()); - } - - if (doc == null) { - throw new IllegalArgumentException("Nothing to percolate"); - } - - return doc; - } - - private void parseSort(XContentParser parser, PercolateContext context) throws Exception { - context.trackScores(true); - sortParseElement.parse(parser, context); - // null, means default sorting by relevancy - if (context.sort() != null) { - throw new ElasticsearchParseException("Only _score desc is supported"); - } - } - - private ParsedDocument parseFetchedDoc(PercolateContext context, BytesReference fetchedDoc, MapperService mapperService, String index, String type) { - DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); - ParsedDocument doc = docMapper.getDocumentMapper().parse(source(fetchedDoc).index(index).type(type).id("_id_for_percolate_api")); - if (doc == null) { - throw new ElasticsearchParseException("No doc to percolate in the request"); - } - if (context.highlight() != null) { - doc.setSource(fetchedDoc); - } - return doc; - } - -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateException.java b/core/src/main/java/org/elasticsearch/percolator/PercolateException.java deleted file mode 100644 index 81a708a75ec..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateException.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.percolator; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchWrapperException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.Objects; - -/** - * Exception during percolating document(s) at runtime. - */ -public class PercolateException extends ElasticsearchException implements ElasticsearchWrapperException { - - private final ShardId shardId; - - public PercolateException(ShardId shardId, String msg, Throwable cause) { - super(msg, cause); - Objects.requireNonNull(shardId, "shardId must not be null"); - this.shardId = shardId; - } - - public ShardId getShardId() { - return shardId; - } - - public PercolateException(StreamInput in) throws IOException{ - super(in); - shardId = ShardId.readShardId(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java deleted file mode 100644 index 1160aec969b..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.percolator; - - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.ReaderUtil; -import org.apache.lucene.index.memory.ExtendedMemoryIndex; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.MultiCollector; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.Version; -import org.elasticsearch.action.percolate.PercolateResponse; -import org.elasticsearch.action.percolate.PercolateShardRequest; -import org.elasticsearch.action.percolate.PercolateShardResponse; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.percolator.PercolatorFieldMapper; -import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.aggregations.AggregationPhase; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.BucketCollector; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; -import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; -import org.elasticsearch.search.aggregations.support.AggregationContext; -import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.highlight.HighlightField; -import org.elasticsearch.search.highlight.HighlightPhase; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; - -import static org.apache.lucene.search.BooleanClause.Occur.FILTER; -import static org.apache.lucene.search.BooleanClause.Occur.MUST; - -public class PercolatorService extends AbstractComponent implements Releasable { - - public final static float NO_SCORE = Float.NEGATIVE_INFINITY; - public final static String TYPE_NAME = ".percolator"; - - private final BigArrays bigArrays; - private final ScriptService scriptService; - private final IndicesService indicesService; - private final ClusterService clusterService; - private final HighlightPhase highlightPhase; - private final AggregationPhase aggregationPhase; - private final PageCacheRecycler pageCacheRecycler; - private final CloseableThreadLocal cache; - private final IndexNameExpressionResolver indexNameExpressionResolver; - private final PercolateDocumentParser percolateDocumentParser; - - private final PercolatorIndex single; - private final PercolatorIndex multi; - private final ParseFieldMatcher parseFieldMatcher; - private final FetchPhase fetchPhase; - - @Inject - public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, - PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, - HighlightPhase highlightPhase, ClusterService clusterService, - AggregationPhase aggregationPhase, ScriptService scriptService, - PercolateDocumentParser percolateDocumentParser, FetchPhase fetchPhase) { - super(settings); - this.indexNameExpressionResolver = indexNameExpressionResolver; - this.percolateDocumentParser = percolateDocumentParser; - this.fetchPhase = fetchPhase; - this.parseFieldMatcher = new ParseFieldMatcher(settings); - this.indicesService = indicesService; - this.pageCacheRecycler = pageCacheRecycler; - this.bigArrays = bigArrays; - this.clusterService = clusterService; - this.scriptService = scriptService; - this.aggregationPhase = aggregationPhase; - this.highlightPhase = highlightPhase; - - final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes(); - cache = new CloseableThreadLocal() { - @Override - protected MemoryIndex initialValue() { - // TODO: should we expose payloads as an option? should offsets be turned on always? - return new ExtendedMemoryIndex(true, false, maxReuseBytes); - } - }; - single = new SingleDocumentPercolatorIndex(cache); - multi = new MultiDocumentPercolatorIndex(cache); - } - - public ReduceResult reduce(boolean onlyCount, List shardResponses) throws IOException { - if (onlyCount) { - long finalCount = 0; - for (PercolateShardResponse shardResponse : shardResponses) { - finalCount += shardResponse.topDocs().totalHits; - } - - InternalAggregations reducedAggregations = reduceAggregations(shardResponses); - return new PercolatorService.ReduceResult(finalCount, reducedAggregations); - } else { - int requestedSize = shardResponses.get(0).requestedSize(); - TopDocs[] shardResults = new TopDocs[shardResponses.size()]; - long foundMatches = 0; - for (int i = 0; i < shardResults.length; i++) { - TopDocs shardResult = shardResponses.get(i).topDocs(); - foundMatches += shardResult.totalHits; - shardResults[i] = shardResult; - } - TopDocs merged = TopDocs.merge(requestedSize, shardResults); - PercolateResponse.Match[] matches = new PercolateResponse.Match[merged.scoreDocs.length]; - for (int i = 0; i < merged.scoreDocs.length; i++) { - ScoreDoc doc = merged.scoreDocs[i]; - PercolateShardResponse shardResponse = shardResponses.get(doc.shardIndex); - String id = shardResponse.ids().get(doc.doc); - Map hl = shardResponse.hls().get(doc.doc); - matches[i] = new PercolateResponse.Match(new Text(shardResponse.getIndex()), new Text(id), doc.score, hl); - } - InternalAggregations reducedAggregations = reduceAggregations(shardResponses); - return new PercolatorService.ReduceResult(foundMatches, matches, reducedAggregations); - } - } - - public PercolateShardResponse percolate(PercolateShardRequest request) throws IOException { - final IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - final IndexShard indexShard = percolateIndexService.getShard(request.shardId().id()); - indexShard.readAllowed(); // check if we can read the shard... - PercolatorQueriesRegistry percolateQueryRegistry = indexShard.percolateRegistry(); - percolateQueryRegistry.prePercolate(); - long startTime = System.nanoTime(); - - // TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request, - // just like is done in other apis. - String[] filteringAliases = indexNameExpressionResolver.filteringAliases( - clusterService.state(), - indexShard.shardId().getIndex().getName(), - request.indices() - ); - Query aliasFilter = percolateIndexService.aliasFilter(percolateIndexService.newQueryShardContext(), filteringAliases); - - SearchShardTarget searchShardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), - request.shardId().id()); - final PercolateContext context = new PercolateContext(request, searchShardTarget, indexShard, percolateIndexService, - pageCacheRecycler, bigArrays, scriptService, aliasFilter, parseFieldMatcher, fetchPhase); - SearchContext.setCurrent(context); - try { - ParsedDocument parsedDocument = percolateDocumentParser.parse(request, context, percolateIndexService.mapperService()); - if (context.searcher().getIndexReader().maxDoc() == 0) { - return new PercolateShardResponse(Lucene.EMPTY_TOP_DOCS, Collections.emptyMap(), Collections.emptyMap(), context); - } - if (context.size() < 0) { - context.size(0); - } - - // parse the source either into one MemoryIndex, if it is a single document or index multiple docs if nested - PercolatorIndex percolatorIndex; - DocumentMapper documentMapper = indexShard.mapperService().documentMapper(request.documentType()); - boolean isNested = documentMapper != null && documentMapper.hasNestedObjects(); - if (parsedDocument.docs().size() > 1) { - assert isNested; - percolatorIndex = multi; - } else { - percolatorIndex = single; - } - percolatorIndex.prepare(context, parsedDocument); - - BucketCollector aggregatorCollector = null; - if (context.aggregations() != null) { - AggregationContext aggregationContext = new AggregationContext(context); - context.aggregations().aggregationContext(aggregationContext); - Aggregator[] aggregators = context.aggregations().factories().createTopLevelAggregators(); - List aggregatorCollectors = new ArrayList<>(aggregators.length); - for (int i = 0; i < aggregators.length; i++) { - if (!(aggregators[i] instanceof GlobalAggregator)) { - Aggregator aggregator = aggregators[i]; - aggregatorCollectors.add(aggregator); - } - } - context.aggregations().aggregators(aggregators); - aggregatorCollector = BucketCollector.wrap(aggregatorCollectors); - aggregatorCollector.preCollection(); - } - PercolatorQueriesRegistry queriesRegistry = indexShard.percolateRegistry(); - return doPercolate(context, queriesRegistry, aggregationPhase, aggregatorCollector, highlightPhase); - } finally { - SearchContext.removeCurrent(); - context.close(); - percolateQueryRegistry.postPercolate(System.nanoTime() - startTime); - } - } - - // moved the core percolation logic to a pck protected method to make testing easier: - static PercolateShardResponse doPercolate(PercolateContext context, PercolatorQueriesRegistry queriesRegistry, AggregationPhase aggregationPhase, @Nullable BucketCollector aggregatorCollector, HighlightPhase highlightPhase) throws IOException { - PercolatorQuery.Builder builder = new PercolatorQuery.Builder(context.docSearcher(), queriesRegistry.getPercolateQueries(), context.percolatorTypeFilter()); - if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0)) { - builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME); - } - if (context.percolateQuery() != null || context.aliasFilter() != null) { - BooleanQuery.Builder bq = new BooleanQuery.Builder(); - if (context.percolateQuery() != null) { - bq.add(context.percolateQuery(), MUST); - } - if (context.aliasFilter() != null) { - bq.add(context.aliasFilter(), FILTER); - } - builder.setPercolateQuery(bq.build()); - } - PercolatorQuery percolatorQuery = builder.build(); - - if (context.isOnlyCount() || context.size() == 0) { - TotalHitCountCollector collector = new TotalHitCountCollector(); - context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector)); - if (aggregatorCollector != null) { - aggregatorCollector.postCollection(); - aggregationPhase.execute(context); - } - return new PercolateShardResponse(new TopDocs(collector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0f), Collections.emptyMap(), Collections.emptyMap(), context); - } else { - int size = context.size(); - if (size > context.searcher().getIndexReader().maxDoc()) { - // prevent easy OOM if more than the total number of docs that - // exist is requested... - size = context.searcher().getIndexReader().maxDoc(); - } - TopScoreDocCollector collector = TopScoreDocCollector.create(size); - context.searcher().search(percolatorQuery, MultiCollector.wrap(collector, aggregatorCollector)); - if (aggregatorCollector != null) { - aggregatorCollector.postCollection(); - aggregationPhase.execute(context); - } - - TopDocs topDocs = collector.topDocs(); - Map ids = new HashMap<>(topDocs.scoreDocs.length); - Map> hls = new HashMap<>(topDocs.scoreDocs.length); - for (ScoreDoc scoreDoc : topDocs.scoreDocs) { - if (context.trackScores() == false) { - // No sort or tracking scores was provided, so use special - // value to indicate to not show the scores: - scoreDoc.score = NO_SCORE; - } - - int segmentIdx = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves()); - LeafReaderContext atomicReaderContext = context.searcher().getIndexReader().leaves().get(segmentIdx); - final int segmentDocId = scoreDoc.doc - atomicReaderContext.docBase; - SingleFieldsVisitor fieldsVisitor = new SingleFieldsVisitor(UidFieldMapper.NAME); - atomicReaderContext.reader().document(segmentDocId, fieldsVisitor); - String id = fieldsVisitor.uid().id(); - ids.put(scoreDoc.doc, id); - if (context.highlight() != null) { - Query query = queriesRegistry.getPercolateQueries().get(new BytesRef(id)); - context.parsedQuery(new ParsedQuery(query)); - context.hitContext().cache().clear(); - highlightPhase.hitExecute(context, context.hitContext()); - hls.put(scoreDoc.doc, context.hitContext().hit().getHighlightFields()); - } - } - return new PercolateShardResponse(topDocs, ids, hls, context); - } - } - - @Override - public void close() { - cache.close(); - } - - private InternalAggregations reduceAggregations(List shardResults) { - if (shardResults.get(0).aggregations() == null) { - return null; - } - - List aggregationsList = new ArrayList<>(shardResults.size()); - for (PercolateShardResponse shardResult : shardResults) { - aggregationsList.add(shardResult.aggregations()); - } - InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new InternalAggregation.ReduceContext(bigArrays, scriptService)); - if (aggregations != null) { - List pipelineAggregators = shardResults.get(0).pipelineAggregators(); - if (pipelineAggregators != null) { - List newAggs = StreamSupport.stream(aggregations.spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), - new InternalAggregation.ReduceContext(bigArrays, scriptService)); - newAggs.add(newAgg); - } - aggregations = new InternalAggregations(newAggs); - } - } - return aggregations; - } - - public final static class ReduceResult { - - private final long count; - private final PercolateResponse.Match[] matches; - private final InternalAggregations reducedAggregations; - - ReduceResult(long count, PercolateResponse.Match[] matches, InternalAggregations reducedAggregations) { - this.count = count; - this.matches = matches; - this.reducedAggregations = reducedAggregations; - } - - public ReduceResult(long count, InternalAggregations reducedAggregations) { - this.count = count; - this.matches = null; - this.reducedAggregations = reducedAggregations; - } - - public long count() { - return count; - } - - public PercolateResponse.Match[] matches() { - return matches; - } - - public InternalAggregations reducedAggregations() { - return reducedAggregations; - } - } - - -} diff --git a/core/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java b/core/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java deleted file mode 100644 index 1d5268e3794..00000000000 --- a/core/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - - -package org.elasticsearch.percolator; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; - -import java.io.IOException; - -/** - * Implementation of {@link PercolatorIndex} that can only hold a single Lucene document - * and is optimized for that - */ -class SingleDocumentPercolatorIndex implements PercolatorIndex { - - private final CloseableThreadLocal cache; - - SingleDocumentPercolatorIndex(CloseableThreadLocal cache) { - this.cache = cache; - } - - @Override - public void prepare(PercolateContext context, ParsedDocument parsedDocument) { - MemoryIndex memoryIndex = cache.get(); - for (IndexableField field : parsedDocument.rootDoc().getFields()) { - Analyzer analyzer = context.analysisService().defaultIndexAnalyzer(); - DocumentMapper documentMapper = context.mapperService().documentMapper(parsedDocument.type()); - if (documentMapper != null && documentMapper.mappers().getMapper(field.name()) != null) { - analyzer = documentMapper.mappers().indexAnalyzer(); - } - if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { - continue; - } - try { - // TODO: instead of passing null here, we can have a CTL> and pass previous, - // like the indexer does - try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { - if (tokenStream != null) { - memoryIndex.addField(field.name(), tokenStream, field.boost()); - } - } - } catch (Exception e) { - throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e); - } - } - context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument); - } - - private class DocEngineSearcher extends Engine.Searcher { - - private final MemoryIndex memoryIndex; - - public DocEngineSearcher(MemoryIndex memoryIndex) { - super("percolate", memoryIndex.createSearcher()); - this.memoryIndex = memoryIndex; - } - - @Override - public void close() { - try { - this.reader().close(); - memoryIndex.reset(); - } catch (IOException e) { - throw new ElasticsearchException("failed to close percolator in-memory index", e); - } - } - } -} diff --git a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java index a7d088ce214..5dab19581a3 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java @@ -20,9 +20,9 @@ package org.elasticsearch.plugins; public class DummyPluginInfo extends PluginInfo { - private DummyPluginInfo(String name, String description, String version, String classname, boolean isolated) { - super(name, description, version, classname, isolated); + private DummyPluginInfo(String name, String description, String version, String classname) { + super(name, description, version, classname); } - public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName", true); + public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName"); } diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 767f6d42179..4096ad57885 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -19,16 +19,18 @@ package org.elasticsearch.plugins; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.JarHell; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserError; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import java.io.BufferedReader; @@ -44,18 +46,22 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFileAttributes; import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; +import java.util.Objects; import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import static java.util.Collections.unmodifiableSet; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; import static org.elasticsearch.common.util.set.Sets.newHashSet; /** @@ -63,9 +69,9 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * * The install command takes a plugin id, which may be any of the following: *
      - *
    • An official elasticsearch plugin name
    • - *
    • Maven coordinates to a plugin zip
    • - *
    • A URL to a plugin zip
    • + *
    • An official elasticsearch plugin name
    • + *
    • Maven coordinates to a plugin zip
    • + *
    • A URL to a plugin zip
    • *
    * * Plugins are packaged as zip files. Each packaged plugin must contain a @@ -74,9 +80,9 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * The installation process first extracts the plugin files into a temporary * directory in order to verify the plugin satisfies the following requirements: *
      - *
    • Jar hell does not exist, either between the plugin's own jars, or with elasticsearch
    • - *
    • The plugin is not a module already provided with elasticsearch
    • - *
    • If the plugin contains extra security permissions, the policy file is validated
    • + *
    • Jar hell does not exist, either between the plugin's own jars, or with elasticsearch
    • + *
    • The plugin is not a module already provided with elasticsearch
    • + *
    • If the plugin contains extra security permissions, the policy file is validated
    • *
    *

    * A plugin may also contain an optional {@code bin} directory which contains scripts. The @@ -88,48 +94,77 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet; * elasticsearch config directory, using the name of the plugin. If any files to be installed * already exist, they will be skipped. */ -class InstallPluginCommand extends CliTool.Command { +class InstallPluginCommand extends Command { private static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging"; // TODO: make this a resource file generated by gradle static final Set MODULES = unmodifiableSet(newHashSet( - "lang-expression", - "lang-groovy")); + "ingest-grok", + "lang-expression", + "lang-groovy", + "lang-painless", + "reindex")); // TODO: make this a resource file generated by gradle - static final Set OFFICIAL_PLUGINS = unmodifiableSet(newHashSet( - "analysis-icu", - "analysis-kuromoji", - "analysis-phonetic", - "analysis-smartcn", - "analysis-stempel", - "delete-by-query", - "discovery-azure", - "discovery-ec2", - "discovery-gce", - "lang-javascript", - "lang-painless", - "lang-python", - "mapper-attachments", - "mapper-murmur3", - "mapper-size", - "repository-azure", - "repository-hdfs", - "repository-s3", - "store-smb")); + static final Set OFFICIAL_PLUGINS = unmodifiableSet(new LinkedHashSet<>(Arrays.asList( + "analysis-icu", + "analysis-kuromoji", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "delete-by-query", + "discovery-azure", + "discovery-ec2", + "discovery-gce", + "ingest-attachment", + "ingest-geoip", + "lang-javascript", + "lang-python", + "mapper-attachments", + "mapper-murmur3", + "mapper-size", + "repository-azure", + "repository-hdfs", + "repository-s3", + "store-smb", + "xpack"))); - private final String pluginId; - private final boolean batch; + private final Environment env; + private final OptionSpec batchOption; + private final OptionSpec arguments; - InstallPluginCommand(Terminal terminal, String pluginId, boolean batch) { - super(terminal); - this.pluginId = pluginId; - this.batch = batch; + InstallPluginCommand(Environment env) { + super("Install a plugin"); + this.env = env; + this.batchOption = parser.acceptsAll(Arrays.asList("b", "batch"), + "Enable batch mode explicitly, automatic confirmation of security permission"); + this.arguments = parser.nonOptions("plugin id"); } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void printAdditionalHelp(Terminal terminal) { + terminal.println("The following official plugins may be installed by name:"); + for (String plugin : OFFICIAL_PLUGINS) { + terminal.println(" " + plugin); + } + terminal.println(""); + } + + @Override + protected void execute(Terminal terminal, OptionSet options) throws Exception { + // TODO: in jopt-simple 5.0 we can enforce a min/max number of positional args + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserError(ExitCodes.USAGE, "Must supply a single plugin id argument"); + } + String pluginId = args.get(0); + boolean isBatch = options.has(batchOption) || System.console() == null; + execute(terminal, pluginId, isBatch); + } + + // pkg private for testing + void execute(Terminal terminal, String pluginId, boolean isBatch) throws Exception { // TODO: remove this leniency!! is it needed anymore? if (Files.exists(env.pluginsFile()) == false) { @@ -137,24 +172,29 @@ class InstallPluginCommand extends CliTool.Command { Files.createDirectory(env.pluginsFile()); } - Path pluginZip = download(pluginId, env.tmpFile()); + Path pluginZip = download(terminal, pluginId, env.tmpFile()); Path extractedZip = unzip(pluginZip, env.pluginsFile()); - install(extractedZip, env); - - return CliTool.ExitStatus.OK; + install(terminal, isBatch, extractedZip); } /** Downloads the plugin and returns the file it was downloaded to. */ - private Path download(String pluginId, Path tmpDir) throws Exception { + private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception { if (OFFICIAL_PLUGINS.contains(pluginId)) { final String version = Version.CURRENT.toString(); final String url; if (System.getProperty(PROPERTY_SUPPORT_STAGING_URLS, "false").equals("true")) { - url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%1$s-%2$s/org/elasticsearch/plugin/%3$s/%1$s/%3$s-%1$s.zip", - version, Build.CURRENT.shortHash(), pluginId); + url = String.format( + Locale.ROOT, + "https://download.elastic.co/elasticsearch/staging/%1$s-%2$s/org/elasticsearch/plugin/%3$s/%1$s/%3$s-%1$s.zip", + version, + Build.CURRENT.shortHash(), + pluginId); } else { - url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%1$s/%2$s/%1$s-%2$s.zip", - pluginId, version); + url = String.format( + Locale.ROOT, + "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%1$s/%2$s/%1$s-%2$s.zip", + pluginId, + version); } terminal.println("-> Downloading " + pluginId + " from elastic"); return downloadZipAndChecksum(url, tmpDir); @@ -164,7 +204,7 @@ class InstallPluginCommand extends CliTool.Command { String[] coordinates = pluginId.split(":"); if (coordinates.length == 3 && pluginId.contains("/") == false) { String mavenUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%1$s/%2$s/%3$s/%2$s-%3$s.zip", - coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */); + coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */); terminal.println("-> Downloading " + pluginId + " from maven central"); return downloadZipAndChecksum(mavenUrl, tmpDir); } @@ -195,14 +235,14 @@ class InstallPluginCommand extends CliTool.Command { BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); expectedChecksum = checksumReader.readLine(); if (checksumReader.readLine() != null) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "Invalid checksum file at " + checksumUrl); + throw new UserError(ExitCodes.IO_ERROR, "Invalid checksum file at " + checksumUrl); } } byte[] zipbytes = Files.readAllBytes(zip); String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes)); if (expectedChecksum.equals(gotChecksum) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); + throw new UserError(ExitCodes.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); } return zip; @@ -210,8 +250,8 @@ class InstallPluginCommand extends CliTool.Command { private Path unzip(Path zip, Path pluginsDir) throws IOException, UserError { // unzip plugin to a staging temp dir - Path target = Files.createTempDirectory(pluginsDir, ".installing-"); - Files.createDirectories(target); + + final Path target = stagingDirectory(pluginsDir); boolean hasEsDir = false; // TODO: we should wrap this in a try/catch and try deleting the target dir on failure? @@ -225,7 +265,14 @@ class InstallPluginCommand extends CliTool.Command { } hasEsDir = true; Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length())); - // TODO: handle name being an absolute path + + // Using the entry name as a path can result in an entry outside of the plugin dir, either if the + // name starts with the root of the filesystem, or it is a relative entry like ../whatever. + // This check attempts to identify both cases by first normalizing the path (which removes foo/..) + // and ensuring the normalized entry is still rooted with the target plugin directory. + if (targetFile.normalize().startsWith(target) == false) { + throw new IOException("Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory"); + } // be on the safe side: do not rely on that directories are always extracted // before their children (although this makes sense, but is it guaranteed?) @@ -233,7 +280,7 @@ class InstallPluginCommand extends CliTool.Command { if (entry.isDirectory() == false) { try (OutputStream out = Files.newOutputStream(targetFile)) { int len; - while((len = zipInput.read(buffer)) >= 0) { + while ((len = zipInput.read(buffer)) >= 0) { out.write(buffer, 0, len); } } @@ -244,13 +291,46 @@ class InstallPluginCommand extends CliTool.Command { Files.delete(zip); if (hasEsDir == false) { IOUtils.rm(target); - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); + throw new UserError(ExitCodes.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); } return target; } + private Path stagingDirectory(Path pluginsDir) throws IOException { + try { + Set perms = new HashSet<>(); + perms.add(PosixFilePermission.OWNER_EXECUTE); + perms.add(PosixFilePermission.OWNER_READ); + perms.add(PosixFilePermission.OWNER_WRITE); + perms.add(PosixFilePermission.GROUP_READ); + perms.add(PosixFilePermission.GROUP_EXECUTE); + perms.add(PosixFilePermission.OTHERS_READ); + perms.add(PosixFilePermission.OTHERS_EXECUTE); + return Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(perms)); + } catch (IllegalArgumentException e) { + // Jimfs throws an IAE where it should throw an UOE + // remove when google/jimfs#30 is integrated into Jimfs + // and the Jimfs test dependency is upgraded to include + // this pull request + final StackTraceElement[] elements = e.getStackTrace(); + if (elements.length >= 1 && + elements[0].getClassName().equals("com.google.common.jimfs.AttributeService") && + elements[0].getMethodName().equals("setAttributeInternal")) { + return stagingDirectoryWithoutPosixPermissions(pluginsDir); + } else { + throw e; + } + } catch (UnsupportedOperationException e) { + return stagingDirectoryWithoutPosixPermissions(pluginsDir); + } + } + + private Path stagingDirectoryWithoutPosixPermissions(Path pluginsDir) throws IOException { + return Files.createTempDirectory(pluginsDir, ".installing-"); + } + /** Load information about the plugin, and verify it can be installed with no errors. */ - private PluginInfo verify(Path pluginRoot, Environment env) throws Exception { + private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch) throws Exception { // read and validate the plugin descriptor PluginInfo info = PluginInfo.readFromProperties(pluginRoot); terminal.println(VERBOSE, info.toString()); @@ -258,36 +338,30 @@ class InstallPluginCommand extends CliTool.Command { // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { - throw new UserError(CliTool.ExitStatus.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); + throw new UserError(ExitCodes.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); } // check for jar hell before any copying - jarHellCheck(pluginRoot, env.pluginsFile(), info.isIsolated()); + jarHellCheck(pluginRoot, env.pluginsFile()); // read optional security policy (extra permissions) // if it exists, confirm or warn the user Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); if (Files.exists(policy)) { - PluginSecurity.readPolicy(policy, terminal, env, batch); + PluginSecurity.readPolicy(policy, terminal, env, isBatch); } return info; } /** check a candidate plugin for jar hell before installing it */ - private void jarHellCheck(Path candidate, Path pluginsDir, boolean isolated) throws Exception { + void jarHellCheck(Path candidate, Path pluginsDir) throws Exception { // create list of current jars in classpath final List jars = new ArrayList<>(); jars.addAll(Arrays.asList(JarHell.parseClassPath())); // read existing bundles. this does some checks on the installation too. - List bundles = PluginsService.getPluginBundles(pluginsDir); - - // if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins - // that's always the first bundle - if (isolated == false) { - jars.addAll(bundles.get(0).urls); - } + PluginsService.getPluginBundles(pluginsDir); // add plugin jars to the list Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar"); @@ -305,16 +379,19 @@ class InstallPluginCommand extends CliTool.Command { * Installs the plugin from {@code tmpRoot} into the plugins dir. * If the plugin has a bin dir and/or a config dir, those are copied. */ - private void install(Path tmpRoot, Environment env) throws Exception { + private void install(Terminal terminal, boolean isBatch, Path tmpRoot) throws Exception { List deleteOnFailure = new ArrayList<>(); deleteOnFailure.add(tmpRoot); try { - PluginInfo info = verify(tmpRoot, env); + PluginInfo info = verify(terminal, tmpRoot, isBatch); final Path destination = env.pluginsFile().resolve(info.getName()); if (Files.exists(destination)) { - throw new UserError(CliTool.ExitStatus.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); + throw new UserError( + ExitCodes.USAGE, + "plugin directory " + destination.toAbsolutePath() + + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command"); } Path tmpBinDir = tmpRoot.resolve("bin"); @@ -347,32 +424,34 @@ class InstallPluginCommand extends CliTool.Command { /** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { if (Files.isDirectory(tmpBinDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); } Files.createDirectory(destBinDir); // setup file attributes for the installed files to those of the parent dir - Set perms = new HashSet<>(); - PosixFileAttributeView binAttrs = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class); - if (binAttrs != null) { - perms = new HashSet<>(binAttrs.readAttributes().permissions()); + final Set perms = new HashSet<>(); + final PosixFileAttributeView binAttributeView = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class); + if (binAttributeView != null) { + perms.addAll(binAttributeView.readAttributes().permissions()); // setting execute bits, since this just means "the file is executable", and actual execution requires read perms.add(PosixFilePermission.OWNER_EXECUTE); perms.add(PosixFilePermission.GROUP_EXECUTE); perms.add(PosixFilePermission.OTHERS_EXECUTE); } - try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { + try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); + throw new UserError( + ExitCodes.DATA_ERROR, + "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); } Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); Files.copy(srcFile, destFile); - if (perms.isEmpty() == false) { - PosixFileAttributeView view = Files.getFileAttributeView(destFile, PosixFileAttributeView.class); + final PosixFileAttributeView view = Files.getFileAttributeView(destFile, PosixFileAttributeView.class); + if (view != null) { view.setPermissions(perms); } } @@ -386,24 +465,43 @@ class InstallPluginCommand extends CliTool.Command { */ private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { if (Files.isDirectory(tmpConfigDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); } // create the plugin's config dir "if necessary" Files.createDirectories(destConfigDir); + final PosixFileAttributeView destConfigDirAttributesView = + Files.getFileAttributeView(destConfigDir.getParent(), PosixFileAttributeView.class); + final PosixFileAttributes destConfigDirAttributes = + destConfigDirAttributesView != null ? destConfigDirAttributesView.readAttributes() : null; + if (destConfigDirAttributes != null) { + setOwnerGroup(destConfigDir, destConfigDirAttributes); + } - try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { + try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); + throw new UserError(ExitCodes.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); } Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); if (Files.exists(destFile) == false) { Files.copy(srcFile, destFile); + if (destConfigDirAttributes != null) { + setOwnerGroup(destFile, destConfigDirAttributes); + } } } } IOUtils.rm(tmpConfigDir); // clean up what we just copied } + + private static void setOwnerGroup(final Path path, final PosixFileAttributes attributes) throws IOException { + Objects.requireNonNull(attributes); + PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class); + assert fileAttributeView != null; + fileAttributeView.setOwner(attributes.owner()); + fileAttributeView.setGroup(attributes.group()); + } + } diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index 6abed4e6bc2..953e698a4c2 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -24,22 +24,25 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.settings.Settings; +import joptsimple.OptionSet; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.env.Environment; /** * A command for the plugin cli to list plugins installed in elasticsearch. */ -class ListPluginsCommand extends CliTool.Command { +class ListPluginsCommand extends Command { - ListPluginsCommand(Terminal terminal) { - super(terminal); + private final Environment env; + + ListPluginsCommand(Environment env) { + super("Lists installed elasticsearch plugins"); + this.env = env; } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void execute(Terminal terminal, OptionSet options) throws Exception { if (Files.exists(env.pluginsFile()) == false) { throw new IOException("Plugins directory missing: " + env.pluginsFile()); } @@ -50,7 +53,5 @@ class ListPluginsCommand extends CliTool.Command { terminal.println(plugin.getFileName().toString()); } } - - return CliTool.ExitStatus.OK; } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java index df402e6359d..be06ea7db1c 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginCli.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginCli.java @@ -19,41 +19,24 @@ package org.elasticsearch.plugins; -import org.apache.commons.cli.CommandLine; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.CliToolConfig; -import org.elasticsearch.common.cli.Terminal; +import org.elasticsearch.cli.MultiCommand; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.util.Locale; - -import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; -import static org.elasticsearch.common.cli.CliToolConfig.Builder.option; - /** * A cli tool for adding, removing and listing plugins for elasticsearch. */ -public class PluginCli extends CliTool { +public class PluginCli extends MultiCommand { - // commands - private static final String LIST_CMD_NAME = "list"; - private static final String INSTALL_CMD_NAME = "install"; - private static final String REMOVE_CMD_NAME = "remove"; - - // usage config - private static final CliToolConfig.Cmd LIST_CMD = cmd(LIST_CMD_NAME, ListPluginsCommand.class).build(); - private static final CliToolConfig.Cmd INSTALL_CMD = cmd(INSTALL_CMD_NAME, InstallPluginCommand.class) - .options(option("b", "batch").required(false)) - .build(); - private static final CliToolConfig.Cmd REMOVE_CMD = cmd(REMOVE_CMD_NAME, RemovePluginCommand.class).build(); - - static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginCli.class) - .cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD) - .build(); + public PluginCli(Environment env) { + super("A tool for managing installed elasticsearch plugins"); + subcommands.put("list", new ListPluginsCommand(env)); + subcommands.put("install", new InstallPluginCommand(env)); + subcommands.put("remove", new RemovePluginCommand(env)); + } public static void main(String[] args) throws Exception { // initialize default for es.logger.level because we will not read the logging.yml @@ -64,61 +47,13 @@ public class PluginCli extends CliTool { // executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch // is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs. // Therefore we print to Terminal. - Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder() + Environment loggingEnvironment = InternalSettingsPreparer.prepareEnvironment(Settings.builder() .put("appender.terminal.type", "terminal") .put("rootLogger", "${es.logger.level}, terminal") .put("es.logger.level", loggerLevel) .build(), Terminal.DEFAULT); - // configure but do not read the logging conf file - LogConfigurator.configure(env.settings(), false); - int status = new PluginCli(Terminal.DEFAULT).execute(args).status(); - exit(status); - } - - @SuppressForbidden(reason = "Allowed to exit explicitly from #main()") - private static void exit(int status) { - System.exit(status); - } - - PluginCli(Terminal terminal) { - super(CONFIG, terminal); - } - - @Override - protected Command parse(String cmdName, CommandLine cli) throws Exception { - switch (cmdName.toLowerCase(Locale.ROOT)) { - case LIST_CMD_NAME: - return new ListPluginsCommand(terminal); - case INSTALL_CMD_NAME: - return parseInstallPluginCommand(cli); - case REMOVE_CMD_NAME: - return parseRemovePluginCommand(cli); - default: - assert false : "can't get here as cmd name is validated before this method is called"; - return exitCmd(ExitStatus.USAGE); - } - } - - private Command parseInstallPluginCommand(CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length != 1) { - return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin id argument"); - } - - boolean batch = System.console() == null; - if (cli.hasOption("b")) { - batch = true; - } - - return new InstallPluginCommand(terminal, args[0], batch); - } - - private Command parseRemovePluginCommand(CommandLine cli) { - String[] args = cli.getArgs(); - if (args.length != 1) { - return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin name argument"); - } - - return new RemovePluginCommand(terminal, args[0]); + LogConfigurator.configure(loggingEnvironment.settings(), false); + Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, Terminal.DEFAULT); + exit(new PluginCli(env).main(args, Terminal.DEFAULT)); } } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 73464d054dd..bd063b3312c 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -44,14 +44,12 @@ public class PluginInfo implements Streamable, ToXContent { static final XContentBuilderString URL = new XContentBuilderString("url"); static final XContentBuilderString VERSION = new XContentBuilderString("version"); static final XContentBuilderString CLASSNAME = new XContentBuilderString("classname"); - static final XContentBuilderString ISOLATED = new XContentBuilderString("isolated"); } private String name; private String description; private String version; private String classname; - private boolean isolated; public PluginInfo() { } @@ -63,12 +61,11 @@ public class PluginInfo implements Streamable, ToXContent { * @param description Its description * @param version Version number */ - PluginInfo(String name, String description, String version, String classname, boolean isolated) { + PluginInfo(String name, String description, String version, String classname) { this.name = name; this.description = description; this.version = version; this.classname = classname; - this.isolated = isolated; } /** reads (and validates) plugin metadata descriptor file */ @@ -106,13 +103,12 @@ public class PluginInfo implements Streamable, ToXContent { } JarHell.checkVersionFormat(javaVersionString); JarHell.checkJavaVersion(name, javaVersionString); - boolean isolated = Boolean.parseBoolean(props.getProperty("isolated", "true")); String classname = props.getProperty("classname"); if (classname == null) { throw new IllegalArgumentException("Property [classname] is missing for plugin [" + name + "]"); } - return new PluginInfo(name, description, version, classname, isolated); + return new PluginInfo(name, description, version, classname); } /** @@ -129,13 +125,6 @@ public class PluginInfo implements Streamable, ToXContent { return description; } - /** - * @return true if plugin has isolated classloader - */ - public boolean isIsolated() { - return isolated; - } - /** * @return plugin's classname */ @@ -162,7 +151,6 @@ public class PluginInfo implements Streamable, ToXContent { this.description = in.readString(); this.version = in.readString(); this.classname = in.readString(); - this.isolated = in.readBoolean(); } @Override @@ -171,7 +159,6 @@ public class PluginInfo implements Streamable, ToXContent { out.writeString(description); out.writeString(version); out.writeString(classname); - out.writeBoolean(isolated); } @Override @@ -181,7 +168,6 @@ public class PluginInfo implements Streamable, ToXContent { builder.field(Fields.VERSION, version); builder.field(Fields.DESCRIPTION, description); builder.field(Fields.CLASSNAME, classname); - builder.field(Fields.ISOLATED, isolated); builder.endObject(); return builder; @@ -212,8 +198,7 @@ public class PluginInfo implements Streamable, ToXContent { .append("Name: ").append(name).append("\n") .append("Description: ").append(description).append("\n") .append("Version: ").append(version).append("\n") - .append(" * Classname: ").append(classname).append("\n") - .append(" * Isolated: ").append(isolated); + .append(" * Classname: ").append(classname); return information.toString(); } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index b14bcaf2ff3..f9c3d1826c9 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -20,8 +20,8 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.Terminal.Verbosity; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.env.Environment; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 3e36c5d8f09..c1f3043cd32 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; @@ -71,7 +72,8 @@ public class PluginsService extends AbstractComponent { */ private final List> plugins; private final PluginsAndModules info; - public static final Setting> MANDATORY_SETTING = Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> MANDATORY_SETTING = + Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), Property.NodeScope); private final Map> onModuleReferences; @@ -101,7 +103,7 @@ public class PluginsService extends AbstractComponent { // first we load plugins that are on the classpath. this is for tests and transport clients for (Class pluginClass : classpathPlugins) { Plugin plugin = loadPlugin(pluginClass, settings); - PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), "NA", pluginClass.getName(), false); + PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), "NA", pluginClass.getName()); if (logger.isTraceEnabled()) { logger.trace("plugin loaded from classpath [{}]", pluginInfo); } @@ -300,9 +302,6 @@ public class PluginsService extends AbstractComponent { continue; // skip over .DS_Store etc } PluginInfo info = PluginInfo.readFromProperties(module); - if (!info.isIsolated()) { - throw new IllegalStateException("modules must be isolated: " + info); - } Bundle bundle = new Bundle(); bundle.plugins.add(info); // gather urls for jar files @@ -327,8 +326,6 @@ public class PluginsService extends AbstractComponent { } List bundles = new ArrayList<>(); - // a special purgatory for plugins that directly depend on each other - bundles.add(new Bundle()); try (DirectoryStream stream = Files.newDirectoryStream(pluginsDirectory)) { for (Path plugin : stream) { @@ -352,13 +349,8 @@ public class PluginsService extends AbstractComponent { urls.add(jar.toRealPath().toUri().toURL()); } } - final Bundle bundle; - if (info.isIsolated() == false) { - bundle = bundles.get(0); // purgatory - } else { - bundle = new Bundle(); - bundles.add(bundle); - } + final Bundle bundle = new Bundle(); + bundles.add(bundle); bundle.plugins.add(info); bundle.urls.addAll(urls); } diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 8ce1056bbfd..a3e6c375f83 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -19,40 +19,55 @@ package org.elasticsearch.plugins; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.cli.CliTool; -import org.elasticsearch.common.cli.Terminal; -import org.elasticsearch.common.cli.UserError; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; - import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE; +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.UserError; +import org.elasticsearch.common.Strings; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.env.Environment; + +import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** * A command for the plugin cli to remove a plugin from elasticsearch. */ -class RemovePluginCommand extends CliTool.Command { - private final String pluginName; +class RemovePluginCommand extends Command { - public RemovePluginCommand(Terminal terminal, String pluginName) { - super(terminal); - this.pluginName = pluginName; + private final Environment env; + private final OptionSpec arguments; + + RemovePluginCommand(Environment env) { + super("Removes a plugin from elasticsearch"); + this.env = env; + this.arguments = parser.nonOptions("plugin name"); } @Override - public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception { + protected void execute(Terminal terminal, OptionSet options) throws Exception { + // TODO: in jopt-simple 5.0 we can enforce a min/max number of positional args + List args = arguments.values(options); + if (args.size() != 1) { + throw new UserError(ExitCodes.USAGE, "Must supply a single plugin id argument"); + } + execute(terminal, args.get(0)); + } + + // pkg private for testing + void execute(Terminal terminal, String pluginName) throws Exception { terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { - throw new UserError(CliTool.ExitStatus.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); + throw new UserError(ExitCodes.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins."); } List pluginPaths = new ArrayList<>(); @@ -60,7 +75,7 @@ class RemovePluginCommand extends CliTool.Command { Path pluginBinDir = env.binFile().resolve(pluginName); if (Files.exists(pluginBinDir)) { if (Files.isDirectory(pluginBinDir) == false) { - throw new UserError(CliTool.ExitStatus.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); + throw new UserError(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); } pluginPaths.add(pluginBinDir); terminal.println(VERBOSE, "Removing: " + pluginBinDir); @@ -72,7 +87,5 @@ class RemovePluginCommand extends CliTool.Command { pluginPaths.add(tmpPluginDir); IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()])); - - return CliTool.ExitStatus.OK; } } diff --git a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 6eb32cfb06f..da2d9688095 100644 --- a/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/core/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -22,7 +22,6 @@ package org.elasticsearch.repositories; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Injector; diff --git a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 91600488332..48ffbd5c1cb 100644 --- a/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/core/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,9 +22,9 @@ package org.elasticsearch.repositories; import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index a6ea381adb4..5d423552a56 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -478,7 +478,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent LOCATION_SETTING = new Setting<>("location", "", Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_LOCATION_SETTING = new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), false, Setting.Scope.CLUSTER); - public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", false, Setting.Scope.CLUSTER); - public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting("repositories.fs.compress", false, false, Setting.Scope.CLUSTER); + public static final Setting LOCATION_SETTING = + new Setting<>("location", "", Function.identity(), Property.NodeScope); + public static final Setting REPOSITORIES_LOCATION_SETTING = + new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), Property.NodeScope); + public static final Setting CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("chunk_size", "-1", Property.NodeScope); + public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = + Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", Property.NodeScope); + public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope); + public static final Setting REPOSITORIES_COMPRESS_SETTING = + Setting.boolSetting("repositories.fs.compress", false, Property.NodeScope); private final FsBlobStore blobStore; diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java index ab9ec72463a..616a36d5066 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLIndexShardRepository.java @@ -19,7 +19,7 @@ package org.elasticsearch.repositories.uri; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository; diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java index 2d15db245aa..77d4f1cc816 100644 --- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.url.URLBlobStore; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.util.URIPattern; import org.elasticsearch.env.Environment; import org.elasticsearch.index.snapshots.IndexShardRepository; @@ -55,19 +56,22 @@ public class URLRepository extends BlobStoreRepository { public final static String TYPE = "url"; - public static final Setting> SUPPORTED_PROTOCOLS_SETTING = Setting.listSetting("repositories.url.supported_protocols", - Arrays.asList("http", "https", "ftp", "file", "jar"), Function.identity(), false, Setting.Scope.CLUSTER); + public static final Setting> SUPPORTED_PROTOCOLS_SETTING = + Setting.listSetting("repositories.url.supported_protocols", Arrays.asList("http", "https", "ftp", "file", "jar"), + Function.identity(), Property.NodeScope); - public static final Setting> ALLOWED_URLS_SETTING = Setting.listSetting("repositories.url.allowed_urls", - Collections.emptyList(), URIPattern::new, false, Setting.Scope.CLUSTER); + public static final Setting> ALLOWED_URLS_SETTING = + Setting.listSetting("repositories.url.allowed_urls", Collections.emptyList(), URIPattern::new, Property.NodeScope); - public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_URL_SETTING = new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), - URLRepository::parseURL, false, Setting.Scope.CLUSTER); + public static final Setting URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, Property.NodeScope); + public static final Setting REPOSITORIES_URL_SETTING = + new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"), URLRepository::parseURL, + Property.NodeScope); - public static final Setting LIST_DIRECTORIES_SETTING = Setting.boolSetting("list_directories", true, false, Setting.Scope.CLUSTER); - public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING = Setting.boolSetting("repositories.uri.list_directories", true, - false, Setting.Scope.CLUSTER); + public static final Setting LIST_DIRECTORIES_SETTING = + Setting.boolSetting("list_directories", true, Property.NodeScope); + public static final Setting REPOSITORIES_LIST_DIRECTORIES_SETTING = + Setting.boolSetting("repositories.uri.list_directories", true, Property.NodeScope); private final List supportedProtocols; diff --git a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 60b3ccce930..b406dfca545 100644 --- a/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/core/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; /** @@ -34,7 +35,8 @@ import org.elasticsearch.common.settings.Settings; * {@link org.elasticsearch.rest.RestController#registerRelevantHeaders(String...)} */ public abstract class BaseRestHandler extends AbstractComponent implements RestHandler { - public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = Setting.boolSetting("rest.action.multi.allow_explicit_index", true, false, Setting.Scope.CLUSTER); + public static final Setting MULTI_ALLOW_EXPLICIT_INDEX = + Setting.boolSetting("rest.action.multi.allow_explicit_index", true, Property.NodeScope); private final Client client; protected final ParseFieldMatcher parseFieldMatcher; diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index ac8eadade0b..52f624849fc 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -126,7 +126,11 @@ public class BytesRestResponse extends RestResponse { if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) { params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request()); } else { - SUPPRESSED_ERROR_LOGGER.info("{} Params: {}", t, channel.request().path(), channel.request().params()); + if (status.getStatus() < 500) { + SUPPRESSED_ERROR_LOGGER.debug("{} Params: {}", t, channel.request().path(), channel.request().params()); + } else { + SUPPRESSED_ERROR_LOGGER.warn("{} Params: {}", t, channel.request().path(), channel.request().params()); + } params = channel.request(); } builder.field("error"); diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index 64e21002d8c..0cbfdd0ef1b 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -176,7 +176,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (Throwable e1) { - logger.error("failed to send failure response for uri [" + request.uri() + "]", e1); + logger.error("failed to send failure response for uri [{}]", e1, request.uri()); } } } else { @@ -275,7 +275,7 @@ public class RestController extends AbstractLifecycleComponent { try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (IOException e1) { - logger.error("Failed to send failure response for uri [" + request.uri() + "]", e1); + logger.error("Failed to send failure response for uri [{}]", e1, request.uri()); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java index fb8e9c63740..1e2aece1646 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java @@ -81,6 +81,7 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.breaker(metrics.contains("breaker")); nodesStatsRequest.script(metrics.contains("script")); nodesStatsRequest.discovery(metrics.contains("discovery")); + nodesStatsRequest.ingest(metrics.contains("ingest")); // check for index specific metrics if (metrics.contains("indices")) { @@ -113,6 +114,6 @@ public class RestNodesStatsAction extends BaseRestHandler { nodesStatsRequest.indices().includeSegmentFileSizes(true); } - client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener(channel)); + client.admin().cluster().nodesStats(nodesStatsRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java index 99cdc16253a..658090bb6db 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestCancelTasksAction.java @@ -52,10 +52,10 @@ public class RestCancelTasksAction extends BaseRestHandler { TaskId parentTaskId = new TaskId(request.param("parent_task_id")); CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); - cancelTasksRequest.taskId(taskId); - cancelTasksRequest.nodesIds(nodesIds); - cancelTasksRequest.actions(actions); - cancelTasksRequest.parentTaskId(parentTaskId); + cancelTasksRequest.setTaskId(taskId); + cancelTasksRequest.setNodesIds(nodesIds); + cancelTasksRequest.setActions(actions); + cancelTasksRequest.setParentTaskId(parentTaskId); client.admin().cluster().cancelTasks(cancelTasksRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java index 992267fa8a5..168d091968e 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/tasks/RestListTasksAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; @@ -50,13 +51,17 @@ public class RestListTasksAction extends BaseRestHandler { TaskId taskId = new TaskId(request.param("taskId")); String[] actions = Strings.splitStringByCommaToArray(request.param("actions")); TaskId parentTaskId = new TaskId(request.param("parent_task_id")); + boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false); + TimeValue timeout = request.paramAsTime("timeout", null); ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.taskId(taskId); - listTasksRequest.nodesIds(nodesIds); - listTasksRequest.detailed(detailed); - listTasksRequest.actions(actions); - listTasksRequest.parentTaskId(parentTaskId); + listTasksRequest.setTaskId(taskId); + listTasksRequest.setNodesIds(nodesIds); + listTasksRequest.setDetailed(detailed); + listTasksRequest.setActions(actions); + listTasksRequest.setParentTaskId(parentTaskId); + listTasksRequest.setWaitForCompletion(waitForCompletion); + listTasksRequest.setTimeout(timeout); client.admin().cluster().listTasks(listTasksRequest, new RestToXContentListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index bcf43a4baa6..4477ff25011 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -47,6 +47,7 @@ public class RestUpdateSettingsAction extends BaseRestHandler { "timeout", "master_timeout", "index", + "preserve_existing", "expand_wildcards", "ignore_unavailable", "allow_no_indices")); @@ -62,6 +63,7 @@ public class RestUpdateSettingsAction extends BaseRestHandler { public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { UpdateSettingsRequest updateSettingsRequest = updateSettingsRequest(Strings.splitStringByCommaToArray(request.param("index"))); updateSettingsRequest.timeout(request.paramAsTime("timeout", updateSettingsRequest.timeout())); + updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java index fa4371846f6..dbda83709ba 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java @@ -71,18 +71,17 @@ public class RestIndicesStatsAction extends BaseRestHandler { indicesStatsRequest.docs(metrics.contains("docs")); indicesStatsRequest.store(metrics.contains("store")); indicesStatsRequest.indexing(metrics.contains("indexing")); - indicesStatsRequest.search(metrics.contains("search")); + indicesStatsRequest.search(metrics.contains("search") || metrics.contains("suggest")); indicesStatsRequest.get(metrics.contains("get")); indicesStatsRequest.merge(metrics.contains("merge")); indicesStatsRequest.refresh(metrics.contains("refresh")); indicesStatsRequest.flush(metrics.contains("flush")); indicesStatsRequest.warmer(metrics.contains("warmer")); indicesStatsRequest.queryCache(metrics.contains("query_cache")); - indicesStatsRequest.percolate(metrics.contains("percolate")); + indicesStatsRequest.percolate(metrics.contains("percolator_cache")); indicesStatsRequest.segments(metrics.contains("segments")); indicesStatsRequest.fieldData(metrics.contains("fielddata")); indicesStatsRequest.completion(metrics.contains("completion")); - indicesStatsRequest.suggest(metrics.contains("suggest")); indicesStatsRequest.requestCache(metrics.contains("request_cache")); indicesStatsRequest.recovery(metrics.contains("recovery")); indicesStatsRequest.translog(metrics.contains("translog")); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index aec087523b8..958fa40b54b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -83,8 +83,8 @@ public class RestIndicesAction extends AbstractCatAction { public void processResponse(final ClusterStateResponse clusterStateResponse) { ClusterState state = clusterStateResponse.getState(); final IndicesOptions concreteIndicesOptions = IndicesOptions.fromOptions(false, true, true, true); - final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, concreteIndicesOptions, indices); - final String[] openIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.lenientExpandOpen(), indices); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, concreteIndicesOptions, indices); + final String[] openIndices = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), indices); ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(openIndices); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); client.admin().cluster().health(clusterHealthRequest, new RestActionListener(channel) { @@ -135,22 +135,22 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell("fielddata.evictions", "sibling:pri;alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); table.addCell("pri.fielddata.evictions", "default:false;text-align:right;desc:fielddata evictions"); - table.addCell("query_cache.memory_size", "sibling:pri;alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.memory_size", "sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); table.addCell("pri.query_cache.memory_size", "default:false;text-align:right;desc:used query cache"); - table.addCell("query_cache.evictions", "sibling:pri;alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); + table.addCell("query_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); table.addCell("pri.query_cache.evictions", "default:false;text-align:right;desc:query cache evictions"); - table.addCell("request_cache.memory_size", "sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used request cache"); + table.addCell("request_cache.memory_size", "sibling:pri;alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); table.addCell("pri.request_cache.memory_size", "default:false;text-align:right;desc:used request cache"); - table.addCell("request_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:request cache evictions"); + table.addCell("request_cache.evictions", "sibling:pri;alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); table.addCell("pri.request_cache.evictions", "default:false;text-align:right;desc:request cache evictions"); - table.addCell("request_cache.hit_count", "sibling:pri;alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:request cache hit count"); + table.addCell("request_cache.hit_count", "sibling:pri;alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit count"); table.addCell("pri.request_cache.hit_count", "default:false;text-align:right;desc:request cache hit count"); - table.addCell("request_cache.miss_count", "sibling:pri;alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:request cache miss count"); + table.addCell("request_cache.miss_count", "sibling:pri;alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss count"); table.addCell("pri.request_cache.miss_count", "default:false;text-align:right;desc:request cache miss count"); table.addCell("flush.total", "sibling:pri;alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); @@ -222,21 +222,9 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell("merges.total_time", "sibling:pri;alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); table.addCell("pri.merges.total_time", "default:false;text-align:right;desc:time spent in merges"); - table.addCell("percolate.current", "sibling:pri;alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations"); - table.addCell("pri.percolate.current", "default:false;text-align:right;desc:number of current percolations"); - - table.addCell("percolate.memory_size", "sibling:pri;alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations"); - table.addCell("pri.percolate.memory_size", "default:false;text-align:right;desc:memory used by percolations"); - table.addCell("percolate.queries", "sibling:pri;alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries"); table.addCell("pri.percolate.queries", "default:false;text-align:right;desc:number of registered percolation queries"); - table.addCell("percolate.time", "sibling:pri;alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating"); - table.addCell("pri.percolate.time", "default:false;text-align:right;desc:time spent percolating"); - - table.addCell("percolate.total", "sibling:pri;alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations"); - table.addCell("pri.percolate.total", "default:false;text-align:right;desc:total percolations"); - table.addCell("refresh.total", "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("pri.refresh.total", "default:false;text-align:right;desc:total refreshes"); @@ -436,20 +424,8 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalTime()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalTime()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getCurrent()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getCurrent()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getMemorySize()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getMemorySize()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getNumQueries()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getNumQueries()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getTime()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getTime()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolate().getCount()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolate().getCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolatorCache().getNumQueries()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolatorCache().getNumQueries()); table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotal()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotal()); @@ -514,14 +490,14 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell(indexStats == null ? null : indexStats.getTotal().getWarmer().totalTime()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getWarmer().totalTime()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getSuggest().getCurrent()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSuggest().getCurrent()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getSuggestCurrent()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getSuggestCurrent()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getSuggest().getTime()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSuggest().getTime()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getSuggestTime()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getSuggestTime()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getSuggest().getCount()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSuggest().getCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getSuggestCount()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getSuggestCount()); table.addCell(indexStats == null ? null : indexStats.getTotal().getTotalMemory()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getTotalMemory()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 0605bc4dcab..54ee855b543 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -46,10 +46,9 @@ import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.percolator.PercolateStats; +import org.elasticsearch.index.percolator.PercolatorQueryCacheStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; -import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.indices.NodeIndicesStats; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -67,7 +66,6 @@ import org.elasticsearch.script.ScriptStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import java.util.Locale; -import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -151,13 +149,13 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"); table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); - table.addCell("query_cache.memory_size", "alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); - table.addCell("query_cache.evictions", "alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); + table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); - table.addCell("request_cache.memory_size", "alias:qcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); - table.addCell("request_cache.evictions", "alias:qce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); - table.addCell("request_cache.hit_count", "alias:qchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); - table.addCell("request_cache.miss_count", "alias:qcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); + table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); + table.addCell("request_cache.evictions", "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); + table.addCell("request_cache.hit_count", "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); + table.addCell("request_cache.miss_count", "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); @@ -186,11 +184,7 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); - table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations"); - table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations"); table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries"); - table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating"); - table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations"); table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); @@ -336,12 +330,8 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(mergeStats == null ? null : mergeStats.getTotalSize()); table.addCell(mergeStats == null ? null : mergeStats.getTotalTime()); - PercolateStats percolateStats = indicesStats == null ? null : indicesStats.getPercolate(); - table.addCell(percolateStats == null ? null : percolateStats.getCurrent()); - table.addCell(percolateStats == null ? null : percolateStats.getMemorySize()); - table.addCell(percolateStats == null ? null : percolateStats.getNumQueries()); - table.addCell(percolateStats == null ? null : percolateStats.getTime()); - table.addCell(percolateStats == null ? null : percolateStats.getCount()); + PercolatorQueryCacheStats percolatorQueryCacheStats = indicesStats == null ? null : indicesStats.getPercolate(); + table.addCell(percolatorQueryCacheStats == null ? null : percolatorQueryCacheStats.getNumQueries()); RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); table.addCell(refreshStats == null ? null : refreshStats.getTotal()); @@ -371,10 +361,9 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory()); table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory()); - SuggestStats suggestStats = indicesStats == null ? null : indicesStats.getSuggest(); - table.addCell(suggestStats == null ? null : suggestStats.getCurrent()); - table.addCell(suggestStats == null ? null : suggestStats.getTime()); - table.addCell(suggestStats == null ? null : suggestStats.getCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestCount()); table.endRow(); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java index 759fac2eb19..7c555c9b357 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java @@ -92,14 +92,16 @@ public class RestRecoveryAction extends AbstractCatAction { .addCell("repository", "alias:rep;desc:repository") .addCell("snapshot", "alias:snap;desc:snapshot") .addCell("files", "alias:f;desc:number of files to recover") + .addCell("files_recovered", "alias:fr;desc:files recovered") .addCell("files_percent", "alias:fp;desc:percent of files recovered") - .addCell("bytes", "alias:b;desc:size to recover in bytes") + .addCell("files_total", "alias:tf;desc:total number of files") + .addCell("bytes", "alias:b;desc:number of bytes to recover") + .addCell("bytes_recovered", "alias:br;desc:bytes recovered") .addCell("bytes_percent", "alias:bp;desc:percent of bytes recovered") - .addCell("total_files", "alias:tf;desc:total number of files") - .addCell("total_bytes", "alias:tb;desc:total number of bytes") - .addCell("translog", "alias:tr;desc:translog operations recovered") - .addCell("translog_percent", "alias:trp;desc:percent of translog recovery") - .addCell("total_translog", "alias:trt;desc:current total translog operations") + .addCell("bytes_total", "alias:tb;desc:total number of bytes") + .addCell("translog_ops", "alias:to;desc:number of translog ops to recover") + .addCell("translog_ops_recovered", "alias:tor;desc:translog ops recovered") + .addCell("translog_ops_percent", "alias:top;desc:percent of translog ops recovered") .endHeaders(); return t; } @@ -151,14 +153,16 @@ public class RestRecoveryAction extends AbstractCatAction { t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository()); t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot()); t.addCell(state.getIndex().totalRecoverFiles()); + t.addCell(state.getIndex().recoveredFileCount()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent())); - t.addCell(state.getIndex().totalRecoverBytes()); - t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalFileCount()); + t.addCell(state.getIndex().totalRecoverBytes()); + t.addCell(state.getIndex().recoveredBytes()); + t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredBytesPercent())); t.addCell(state.getIndex().totalBytes()); + t.addCell(state.getTranslog().totalOperations()); t.addCell(state.getTranslog().recoveredOperations()); t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getTranslog().recoveredPercent())); - t.addCell(state.getTranslog().totalOperations()); t.endRow(); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 94a82e8e773..8bf67653f6f 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -109,8 +109,8 @@ public class RestShardsAction extends AbstractCatAction { table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"); table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); - table.addCell("query_cache.memory_size", "alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); - table.addCell("query_cache.evictions", "alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); + table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); @@ -139,11 +139,7 @@ public class RestShardsAction extends AbstractCatAction { table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); - table.addCell("percolate.current", "alias:pc,percolateCurrent;default:false;text-align:right;desc:number of current percolations"); - table.addCell("percolate.memory_size", "alias:pm,percolateMemory;default:false;text-align:right;desc:memory used by percolations"); table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries"); - table.addCell("percolate.time", "alias:pti,percolateTime;default:false;text-align:right;desc:time spent percolating"); - table.addCell("percolate.total", "alias:pto,percolateTotal;default:false;text-align:right;desc:total percolations"); table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); @@ -191,7 +187,7 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(shard.getIndexName()); table.addCell(shard.id()); - IndexMetaData indexMeta = state.getState().getMetaData().index(shard.index()); + IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index()); boolean usesShadowReplicas = false; if (indexMeta != null) { usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.getSettings()); @@ -282,11 +278,7 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalSize()); table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalTime()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getCurrent()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getMemorySize()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getNumQueries()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getTime()); - table.addCell(commonStats == null ? null : commonStats.getPercolate().getCount()); + table.addCell(commonStats == null ? null : commonStats.getPercolatorCache().getNumQueries()); table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotal()); table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotalTime()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index fa2e662c738..b4ba4bbeffa 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -63,11 +63,9 @@ public class RestThreadPoolAction extends AbstractCatAction { ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.FORCE_MERGE, - ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT, - ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER }; @@ -79,11 +77,9 @@ public class RestThreadPoolAction extends AbstractCatAction { "i", "ma", "fm", - "p", "r", "s", "sn", - "su", "w" }; diff --git a/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java b/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java index bf3f0a3e5df..205bea92f96 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/main/RestMainAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 5d9ac118831..10258aaaee4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -44,6 +44,7 @@ import org.elasticsearch.rest.action.support.RestToXContentListener; import org.elasticsearch.script.Template; import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.suggest.Suggesters; import java.util.Map; @@ -60,13 +61,14 @@ public class RestMultiSearchAction extends BaseRestHandler { private final boolean allowExplicitIndex; private final IndicesQueriesRegistry indicesQueriesRegistry; private final AggregatorParsers aggParsers; - + private final Suggesters suggesters; @Inject public RestMultiSearchAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry indicesQueriesRegistry, - AggregatorParsers aggParsers) { + AggregatorParsers aggParsers, Suggesters suggesters) { super(settings, client); this.aggParsers = aggParsers; + this.suggesters = suggesters; controller.registerHandler(GET, "/_msearch", this); controller.registerHandler(POST, "/_msearch", this); @@ -97,7 +99,7 @@ public class RestMultiSearchAction extends BaseRestHandler { IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions()); parseRequest(multiSearchRequest, RestActions.getRestContent(request), isTemplateRequest, indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex, indicesQueriesRegistry, - parseFieldMatcher, aggParsers); + parseFieldMatcher, aggParsers, suggesters); client.multiSearch(multiSearchRequest, new RestToXContentListener<>(channel)); } @@ -112,7 +114,8 @@ public class RestMultiSearchAction extends BaseRestHandler { @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex, IndicesQueriesRegistry indicesQueriesRegistry, - ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) throws Exception { + ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, + Suggesters suggesters) throws Exception { XContent xContent = XContentFactory.xContent(data); int from = 0; int length = data.length(); @@ -193,7 +196,7 @@ public class RestMultiSearchAction extends BaseRestHandler { } else { try (XContentParser requestParser = XContentFactory.xContent(slice).createParser(slice)) { queryParseContext.reset(requestParser); - searchRequest.source(SearchSourceBuilder.parseSearchSource(requestParser, queryParseContext, aggParsers)); + searchRequest.source(SearchSourceBuilder.parseSearchSource(requestParser, queryParseContext, aggParsers, suggesters)); } } // move pointers diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index d1cd09373f7..9d533d15ff2 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -48,6 +48,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; import java.util.Arrays; @@ -64,13 +66,15 @@ public class RestSearchAction extends BaseRestHandler { private final IndicesQueriesRegistry queryRegistry; private final AggregatorParsers aggParsers; + private final Suggesters suggesters; @Inject public RestSearchAction(Settings settings, RestController controller, Client client, IndicesQueriesRegistry queryRegistry, - AggregatorParsers aggParsers) { + AggregatorParsers aggParsers, Suggesters suggesters) { super(settings, client); this.queryRegistry = queryRegistry; this.aggParsers = aggParsers; + this.suggesters = suggesters; controller.registerHandler(GET, "/_search", this); controller.registerHandler(POST, "/_search", this); controller.registerHandler(GET, "/{index}/_search", this); @@ -88,7 +92,7 @@ public class RestSearchAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { SearchRequest searchRequest = new SearchRequest(); - RestSearchAction.parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, null); + parseSearchRequest(searchRequest, queryRegistry, request, parseFieldMatcher, aggParsers, suggesters, null); client.search(searchRequest, new RestStatusToXContentListener<>(channel)); } @@ -101,8 +105,10 @@ public class RestSearchAction extends BaseRestHandler { * content is read from the request using * RestAction.hasBodyContent. */ - public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, - ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, BytesReference restContent) throws IOException { + public static void parseSearchRequest(SearchRequest searchRequest, IndicesQueriesRegistry indicesQueriesRegistry, RestRequest request, + ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers, Suggesters suggesters, BytesReference restContent) + throws IOException { + if (searchRequest.source() == null) { searchRequest.source(new SearchSourceBuilder()); } @@ -117,16 +123,15 @@ public class RestSearchAction extends BaseRestHandler { } if (restContent != null) { QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); - if (isTemplateRequest) { - try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { - context.reset(parser); - context.parseFieldMatcher(parseFieldMatcher); + try (XContentParser parser = XContentFactory.xContent(restContent).createParser(restContent)) { + context.reset(parser); + context.parseFieldMatcher(parseFieldMatcher); + if (isTemplateRequest) { Template template = TemplateQueryParser.parse(parser, context.parseFieldMatcher(), "params", "template"); searchRequest.template(template); + } else { + searchRequest.source().parseXContent(parser, context, aggParsers, suggesters); } - } else { - RestActions.parseRestSearchSource(searchRequest.source(), restContent, indicesQueriesRegistry, parseFieldMatcher, - aggParsers); } } @@ -254,8 +259,10 @@ public class RestSearchAction extends BaseRestHandler { String suggestText = request.param("suggest_text", request.param("q")); int suggestSize = request.paramAsInt("suggest_size", 5); String suggestMode = request.param("suggest_mode"); - searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion( - termSuggestion(suggestField).field(suggestField).text(suggestText).size(suggestSize).suggestMode(suggestMode))); + searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(suggestField, + termSuggestion(suggestField) + .text(suggestText).size(suggestSize) + .suggestMode(SuggestMode.resolve(suggestMode)))); } } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 4e6b88b68b8..1bbc662929c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -19,14 +19,20 @@ package org.elasticsearch.rest.action.suggest; -import org.elasticsearch.action.suggest.SuggestRequest; -import org.elasticsearch.action.suggest.SuggestResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -36,7 +42,12 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.Suggesters; + +import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -47,9 +58,15 @@ import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastSh */ public class RestSuggestAction extends BaseRestHandler { + private final IndicesQueriesRegistry queryRegistry; + private final Suggesters suggesters; + @Inject - public RestSuggestAction(Settings settings, RestController controller, Client client) { + public RestSuggestAction(Settings settings, RestController controller, Client client, + IndicesQueriesRegistry queryRegistry, Suggesters suggesters) { super(settings, client); + this.queryRegistry = queryRegistry; + this.suggesters = suggesters; controller.registerHandler(POST, "/_suggest", this); controller.registerHandler(GET, "/_suggest", this); controller.registerHandler(POST, "/{index}/_suggest", this); @@ -57,26 +74,33 @@ public class RestSuggestAction extends BaseRestHandler { } @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index"))); - suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions())); + public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws IOException { + final SearchRequest searchRequest = new SearchRequest(Strings.splitStringByCommaToArray(request.param("index")), new SearchSourceBuilder()); + searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); if (RestActions.hasBodyContent(request)) { - suggestRequest.suggest(RestActions.getRestContent(request)); + final BytesReference sourceBytes = RestActions.getRestContent(request); + try (XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes)) { + final QueryParseContext context = new QueryParseContext(queryRegistry); + context.reset(parser); + context.parseFieldMatcher(parseFieldMatcher); + searchRequest.source().suggest(SuggestBuilder.fromXContent(context, suggesters)); + } } else { throw new IllegalArgumentException("no content or source provided to execute suggestion"); } - suggestRequest.routing(request.param("routing")); - suggestRequest.preference(request.param("preference")); - - client.suggest(suggestRequest, new RestBuilderListener(channel) { + searchRequest.routing(request.param("routing")); + searchRequest.preference(request.param("preference")); + client.search(searchRequest, new RestBuilderListener(channel) { @Override - public RestResponse buildResponse(SuggestResponse response, XContentBuilder builder) throws Exception { - RestStatus restStatus = RestStatus.status(response.getSuccessfulShards(), response.getTotalShards(), response.getShardFailures()); + public RestResponse buildResponse(SearchResponse response, XContentBuilder builder) throws Exception { + RestStatus restStatus = RestStatus.status(response.getSuccessfulShards(), + response.getTotalShards(), response.getShardFailures()); builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); + buildBroadcastShardsHeader(builder, request, response.getTotalShards(), + response.getSuccessfulShards(), response.getFailedShards(), response.getShardFailures()); Suggest suggest = response.getSuggest(); if (suggest != null) { - suggest.toXContent(builder, request); + suggest.toInnerXContent(builder, request); } builder.endObject(); return new BytesRestResponse(restStatus, builder); diff --git a/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java index 692a9dc3402..55063664343 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java +++ b/core/src/main/java/org/elasticsearch/rest/action/support/RestActions.java @@ -42,6 +42,7 @@ import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; @@ -114,16 +115,6 @@ public class RestActions { return queryBuilder; } - public static void parseRestSearchSource(SearchSourceBuilder source, BytesReference sourceBytes, IndicesQueriesRegistry queryRegistry, - ParseFieldMatcher parseFieldMatcher, AggregatorParsers aggParsers) - throws IOException { - XContentParser parser = XContentFactory.xContent(sourceBytes).createParser(sourceBytes); - QueryParseContext queryParseContext = new QueryParseContext(queryRegistry); - queryParseContext.reset(parser); - queryParseContext.parseFieldMatcher(parseFieldMatcher); - source.parseXContent(parser, queryParseContext, aggParsers); - } - /** * Get Rest content from either payload or source parameter * @param request Rest request diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 8e1ac1c8d77..90c617540fc 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -55,7 +56,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; import org.elasticsearch.watcher.FileWatcher; @@ -84,10 +84,13 @@ public class ScriptService extends AbstractComponent implements Closeable { static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; - public static final Setting SCRIPT_CACHE_SIZE_SETTING = Setting.intSetting("script.cache.max_size", 100, 0, false, Setting.Scope.CLUSTER); - public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER); + public static final Setting SCRIPT_CACHE_SIZE_SETTING = + Setting.intSetting("script.cache.max_size", 100, 0, Property.NodeScope); + public static final Setting SCRIPT_CACHE_EXPIRE_SETTING = + Setting.positiveTimeSetting("script.cache.expire", TimeValue.timeValueMillis(0), Property.NodeScope); public static final String SCRIPT_INDEX = ".scripts"; - public static final Setting SCRIPT_AUTO_RELOAD_ENABLED_SETTING = Setting.boolSetting("script.auto_reload_enabled", true, false, Setting.Scope.CLUSTER); + public static final Setting SCRIPT_AUTO_RELOAD_ENABLED_SETTING = + Setting.boolSetting("script.auto_reload_enabled", true, Property.NodeScope); private final String defaultLang; @@ -225,6 +228,8 @@ public class ScriptService extends AbstractComponent implements Closeable { return scriptEngineService; } + + /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ @@ -516,46 +521,53 @@ public class ScriptService extends AbstractComponent implements Closeable { private class ScriptChangesListener extends FileChangesListener { - private Tuple scriptNameExt(Path file) { + private Tuple getScriptNameExt(Path file) { Path scriptPath = scriptsDirectory.relativize(file); int extIndex = scriptPath.toString().lastIndexOf('.'); - if (extIndex != -1) { - String ext = scriptPath.toString().substring(extIndex + 1); - String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); - return new Tuple<>(scriptName, ext); - } else { + if (extIndex <= 0) { return null; } + + String ext = scriptPath.toString().substring(extIndex + 1); + if (ext.isEmpty()) { + return null; + } + + String scriptName = scriptPath.toString().substring(0, extIndex).replace(scriptPath.getFileSystem().getSeparator(), "_"); + return new Tuple<>(scriptName, ext); } @Override public void onFileInit(Path file) { + Tuple scriptNameExt = getScriptNameExt(file); + if (scriptNameExt == null) { + logger.debug("Skipped script with invalid extension : [{}]", file); + return; + } if (logger.isTraceEnabled()) { logger.trace("Loading script file : [{}]", file); } - Tuple scriptNameExt = scriptNameExt(file); - if (scriptNameExt != null) { - ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); - if (engineService == null) { - logger.warn("no script engine found for [{}]", scriptNameExt.v2()); - } else { - try { - //we don't know yet what the script will be used for, but if all of the operations for this lang - // with file scripts are disabled, it makes no sense to even compile it and cache it. - if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { - logger.info("compiling script file [{}]", file.toAbsolutePath()); - try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { - String script = Streams.copyToString(reader); - CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); - scriptMetrics.onCompilation(); - } - } else { - logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); + + ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); + if (engineService == null) { + logger.warn("No script engine found for [{}]", scriptNameExt.v2()); + } else { + try { + //we don't know yet what the script will be used for, but if all of the operations for this lang + // with file scripts are disabled, it makes no sense to even compile it and cache it. + if (isAnyScriptContextEnabled(engineService.getTypes().get(0), engineService, ScriptType.FILE)) { + logger.info("compiling script file [{}]", file.toAbsolutePath()); + try (InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { + String script = Streams.copyToString(reader); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.getTypes().get(0), engineService.compile(script, Collections.emptyMap()))); + scriptMetrics.onCompilation(); } - } catch (Throwable e) { - logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); + } else { + logger.warn("skipping compile of script file [{}] as all scripted operations are disabled for file scripts", file.toAbsolutePath()); } + } catch (Throwable e) { + logger.warn("failed to load/compile script [{}]", e, scriptNameExt.v1()); } } } @@ -567,7 +579,7 @@ public class ScriptService extends AbstractComponent implements Closeable { @Override public void onFileDeleted(Path file) { - Tuple scriptNameExt = scriptNameExt(file); + Tuple scriptNameExt = getScriptNameExt(file); if (scriptNameExt != null) { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java index 8ececfe25bb..1bf7fdfc843 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptSettings.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptSettings.java @@ -21,6 +21,7 @@ package org.elasticsearch.script; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; @@ -44,8 +45,7 @@ public class ScriptSettings { ScriptModes.sourceKey(scriptType), scriptType.getDefaultScriptMode().getMode(), ScriptMode::parse, - false, - Setting.Scope.CLUSTER)); + Property.NodeScope)); } SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap); } @@ -66,7 +66,7 @@ public class ScriptSettings { throw new IllegalArgumentException("unregistered default language [" + setting + "]"); } return setting; - }, false, Setting.Scope.CLUSTER); + }, Property.NodeScope); } private static Map> contextSettings(ScriptContextRegistry scriptContextRegistry) { @@ -76,8 +76,7 @@ public class ScriptSettings { ScriptModes.operationKey(scriptContext), ScriptMode.OFF.getMode(), ScriptMode::parse, - false, - Setting.Scope.CLUSTER + Property.NodeScope )); } return scriptContextSettingMap; @@ -137,8 +136,7 @@ public class ScriptSettings { ScriptModes.getKey(language, scriptType, scriptContext), defaultSetting, ScriptMode::parse, - false, - Setting.Scope.CLUSTER); + Property.NodeScope); scriptModeSettings.add(setting); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchException.java b/core/src/main/java/org/elasticsearch/search/SearchException.java index 0d181cc1dce..535f8acd446 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchException.java +++ b/core/src/main/java/org/elasticsearch/search/SearchException.java @@ -45,7 +45,7 @@ public class SearchException extends ElasticsearchException implements Elasticse public SearchException(StreamInput in) throws IOException { super(in); if (in.readBoolean()) { - shardTarget = SearchShardTarget.readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); } else { shardTarget = null; } @@ -54,7 +54,12 @@ public class SearchException extends ElasticsearchException implements Elasticse @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalStreamable(shardTarget); + if (shardTarget == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + shardTarget.writeTo(out); + } } public SearchShardTarget shard() { diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java index 0d4faca3672..8756a31c444 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java @@ -21,21 +21,13 @@ package org.elasticsearch.search; import org.apache.lucene.search.BooleanQuery; import org.elasticsearch.common.geo.ShapesAvailability; -import org.elasticsearch.common.geo.builders.CircleBuilder; -import org.elasticsearch.common.geo.builders.EnvelopeBuilder; -import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; -import org.elasticsearch.common.geo.builders.LineStringBuilder; -import org.elasticsearch.common.geo.builders.MultiLineStringBuilder; -import org.elasticsearch.common.geo.builders.MultiPointBuilder; -import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; -import org.elasticsearch.common.geo.builders.PointBuilder; -import org.elasticsearch.common.geo.builders.PolygonBuilder; -import org.elasticsearch.common.geo.builders.ShapeBuilder; +import org.elasticsearch.common.geo.builders.ShapeBuilders; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.percolator.PercolatorHighlightSubFetchPhase; import org.elasticsearch.index.query.BoolQueryParser; import org.elasticsearch.index.query.BoostingQueryParser; import org.elasticsearch.index.query.CommonTermsQueryParser; @@ -62,6 +54,7 @@ import org.elasticsearch.index.query.MoreLikeThisQueryParser; import org.elasticsearch.index.query.MultiMatchQueryParser; import org.elasticsearch.index.query.NestedQueryParser; import org.elasticsearch.index.query.ParentIdQueryParser; +import org.elasticsearch.index.query.PercolatorQueryParser; import org.elasticsearch.index.query.PrefixQueryParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParser; @@ -224,8 +217,21 @@ import org.elasticsearch.search.highlight.Highlighters; import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescoreBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.GeoDistanceSortBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.suggest.Suggester; import org.elasticsearch.search.suggest.Suggesters; +import org.elasticsearch.search.suggest.SuggestionBuilder; +import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.Laplace; +import org.elasticsearch.search.suggest.phrase.LinearInterpolation; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.SmoothingModel; +import org.elasticsearch.search.suggest.phrase.StupidBackoff; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import java.util.ArrayList; import java.util.HashMap; @@ -271,14 +277,17 @@ public class SearchModule extends AbstractModule { registerBuiltinFunctionScoreParsers(); registerBuiltinQueryParsers(); + registerBuiltinRescorers(); + registerBuiltinSorts(); } public void registerHighlighter(String key, Class clazz) { highlighters.registerExtension(key, clazz); } - public void registerSuggester(String key, Class suggester) { - suggesters.registerExtension(key, suggester); + public void registerSuggester(String key, Suggester suggester) { + suggesters.registerExtension(key, suggester.getClass()); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, suggester.getBuilderPrototype()); } /** @@ -334,7 +343,6 @@ public class SearchModule extends AbstractModule { configureSuggesters(); configureFetchSubPhase(); configureShapes(); - configureRescorers(); } protected void configureFetchSubPhase() { @@ -347,6 +355,7 @@ public class SearchModule extends AbstractModule { fetchSubPhaseMultibinder.addBinding().to(MatchedQueriesFetchSubPhase.class); fetchSubPhaseMultibinder.addBinding().to(HighlightPhase.class); fetchSubPhaseMultibinder.addBinding().to(ParentFieldSubFetchPhase.class); + fetchSubPhaseMultibinder.addBinding().to(PercolatorHighlightSubFetchPhase.class); for (Class clazz : fetchSubPhases) { fetchSubPhaseMultibinder.addBinding().to(clazz); } @@ -371,6 +380,12 @@ public class SearchModule extends AbstractModule { protected void configureSuggesters() { suggesters.bind(binder()); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, TermSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, PhraseSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SuggestionBuilder.class, CompletionSuggestionBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, Laplace.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, LinearInterpolation.PROTOTYPE); + namedWriteableRegistry.registerPrototype(SmoothingModel.class, StupidBackoff.PROTOTYPE); } protected void configureHighlighters() { @@ -455,20 +470,19 @@ public class SearchModule extends AbstractModule { private void configureShapes() { if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); - namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); + ShapeBuilders.register(namedWriteableRegistry); } } - private void configureRescorers() { - namedWriteableRegistry.registerPrototype(RescoreBuilder.class, QueryRescorerBuilder.PROTOTYPE); + private void registerBuiltinRescorers() { + namedWriteableRegistry.register(RescoreBuilder.class, QueryRescorerBuilder.NAME, QueryRescorerBuilder::new); + } + + private void registerBuiltinSorts() { + namedWriteableRegistry.register(SortBuilder.class, GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::new); + namedWriteableRegistry.register(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new); + namedWriteableRegistry.register(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new); + namedWriteableRegistry.register(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new); } private void registerBuiltinFunctionScoreParsers() { @@ -531,6 +545,7 @@ public class SearchModule extends AbstractModule { registerQueryParser(ExistsQueryParser::new); registerQueryParser(MatchNoneQueryParser::new); registerQueryParser(ParentIdQueryParser::new); + registerQueryParser(PercolatorQueryParser::new); if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQueryParser(GeoShapeQueryParser::new); } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index eb3568296ad..98085bfed35 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -21,11 +21,12 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.ObjectFloatHashMap; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesReference; @@ -34,19 +35,20 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.fieldstats.FieldStatsProvider; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.search.stats.ShardSearchStats; @@ -54,7 +56,6 @@ import org.elasticsearch.index.search.stats.StatsGroupsParseElement; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; @@ -91,12 +92,15 @@ import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.suggest.Suggesters; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicLong; @@ -111,11 +115,14 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public class SearchService extends AbstractLifecycleComponent implements IndexEventListener { // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes - public static final Setting DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), false, Setting.Scope.CLUSTER); - public static final Setting KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), false, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_KEEPALIVE_SETTING = + Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), Property.NodeScope); + public static final Setting KEEPALIVE_INTERVAL_SETTING = + Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), Property.NodeScope); public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); - public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER); + public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = + Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, Property.Dynamic, Property.NodeScope); private final ThreadPool threadPool; @@ -149,14 +156,16 @@ public class SearchService extends AbstractLifecycleComponent imp private final Map elementParsers; private final ParseFieldMatcher parseFieldMatcher; - private AggregatorParsers aggParsers; + private final AggregatorParsers aggParsers; + private final Suggesters suggesters; @Inject public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, - ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, - QueryPhase queryPhase, FetchPhase fetchPhase, AggregatorParsers aggParsers) { + ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, + QueryPhase queryPhase, FetchPhase fetchPhase, AggregatorParsers aggParsers, Suggesters suggesters) { super(settings); this.aggParsers = aggParsers; + this.suggesters = suggesters; this.parseFieldMatcher = new ParseFieldMatcher(settings); this.threadPool = threadPool; this.clusterService = clusterService; @@ -192,7 +201,7 @@ public class SearchService extends AbstractLifecycleComponent imp public void afterIndexClosed(Index index, Settings indexSettings) { // once an index is closed we can just clean up all the pending search context information // to release memory and let references to the filesystem go etc. - IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index.getName()); + IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index); if (idxMeta != null && idxMeta.getState() == IndexMetaData.State.CLOSE) { // we need to check if it's really closed // since sometimes due to a relocation we already closed the shard and that causes the index to be closed @@ -234,7 +243,7 @@ public class SearchService extends AbstractLifecycleComponent imp FutureUtils.cancel(keepAliveReaper); } - public DfsSearchResult executeDfsPhase(ShardSearchRequest request) { + public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws IOException { final SearchContext context = createAndPutContext(request); try { contextProcessing(context); @@ -263,7 +272,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) { + public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) throws IOException { final SearchContext context = createAndPutContext(request); final ShardSearchStats shardSearchStats = context.indexShard().searchService(); try { @@ -355,7 +364,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) { + public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws IOException { final SearchContext context = createAndPutContext(request); contextProcessing(context); try { @@ -512,7 +521,7 @@ public class SearchService extends AbstractLifecycleComponent imp return context; } - final SearchContext createAndPutContext(ShardSearchRequest request) { + final SearchContext createAndPutContext(ShardSearchRequest request) throws IOException { SearchContext context = createContext(request, null); boolean success = false; try { @@ -530,11 +539,10 @@ public class SearchService extends AbstractLifecycleComponent imp } } - final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { - IndexService indexService = indicesService.indexServiceSafe(request.index()); - IndexShard indexShard = indexService.getShard(request.shardId()); - - SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId().getIndex(), request.shardId()); + final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), indexShard.shardId()); Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; @@ -542,7 +550,15 @@ public class SearchService extends AbstractLifecycleComponent imp indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout, fetchPhase); + context.getQueryShardContext().setFieldStatsProvider(new FieldStatsProvider(engineSearcher, indexService.mapperService())); SearchContext.setCurrent(context); + request.rewrite(context.getQueryShardContext()); + // reset that we have used nowInMillis from the context since it may + // have been rewritten so its no longer in the query and the request can + // be cached. If it is still present in the request (e.g. in a range + // aggregation) it will still be caught when the aggregation is + // evaluated. + context.resetNowInMillisUsed(); try { if (request.scroll() != null) { context.scrollContext(new ScrollContext()); @@ -555,7 +571,7 @@ public class SearchService extends AbstractLifecycleComponent imp QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry()); queryParseContext.reset(parser); queryParseContext.parseFieldMatcher(parseFieldMatcher); - parseSource(context, SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers)); + parseSource(context, SearchSourceBuilder.parseSearchSource(parser, queryParseContext, aggParsers, suggesters)); } } parseSource(context, request.source()); @@ -669,33 +685,13 @@ public class SearchService extends AbstractLifecycleComponent imp context.parsedPostFilter(queryShardContext.toQuery(source.postFilter())); } if (source.sorts() != null) { - XContentParser completeSortParser = null; try { - XContentBuilder completeSortBuilder = XContentFactory.jsonBuilder(); - completeSortBuilder.startObject(); - completeSortBuilder.startArray("sort"); - for (BytesReference sort : source.sorts()) { - XContentParser parser = XContentFactory.xContent(sort).createParser(sort); - parser.nextToken(); - completeSortBuilder.copyCurrentStructure(parser); + Optional optionalSort = SortBuilder.buildSort(source.sorts(), context.getQueryShardContext()); + if (optionalSort.isPresent()) { + context.sort(optionalSort.get()); } - completeSortBuilder.endArray(); - completeSortBuilder.endObject(); - BytesReference completeSortBytes = completeSortBuilder.bytes(); - completeSortParser = XContentFactory.xContent(completeSortBytes).createParser(completeSortBytes); - completeSortParser.nextToken(); - completeSortParser.nextToken(); - completeSortParser.nextToken(); - this.elementParsers.get("sort").parse(completeSortParser, context); - } catch (Exception e) { - String sSource = "_na_"; - try { - sSource = source.toString(); - } catch (Throwable e1) { - // ignore - } - XContentLocation location = completeSortParser != null ? completeSortParser.getTokenLocation() : null; - throw new SearchParseException(context, "failed to parse sort source [" + sSource + "]", location, e); + } catch (IOException e) { + throw new SearchContextException(context, "failed to create sort elements", e); } } context.trackScores(source.trackScores()); @@ -718,26 +714,16 @@ public class SearchService extends AbstractLifecycleComponent imp } } if (source.suggest() != null) { - XContentParser suggestParser = null; try { - suggestParser = XContentFactory.xContent(source.suggest()).createParser(source.suggest()); - suggestParser.nextToken(); - this.elementParsers.get("suggest").parse(suggestParser, context); - } catch (Exception e) { - String sSource = "_na_"; - try { - sSource = source.toString(); - } catch (Throwable e1) { - // ignore - } - XContentLocation location = suggestParser != null ? suggestParser.getTokenLocation() : null; - throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e); + context.suggest(source.suggest().build(queryShardContext)); + } catch (IOException e) { + throw new SearchContextException(context, "failed to create SuggestionSearchContext", e); } } if (source.rescores() != null) { try { for (RescoreBuilder rescore : source.rescores()) { - context.addRescore(rescore.build(context.getQueryShardContext())); + context.addRescore(rescore.build(queryShardContext)); } } catch (IOException e) { throw new SearchContextException(context, "failed to create RescoreSearchContext", e); @@ -762,7 +748,7 @@ public class SearchService extends AbstractLifecycleComponent imp if (source.highlighter() != null) { HighlightBuilder highlightBuilder = source.highlighter(); try { - context.highlight(highlightBuilder.build(context.getQueryShardContext())); + context.highlight(highlightBuilder.build(queryShardContext)); } catch (IOException e) { throw new SearchContextException(context, "failed to create SearchContextHighlighter", e); } @@ -802,6 +788,11 @@ public class SearchService extends AbstractLifecycleComponent imp } else { SearchParseElement parseElement = this.elementParsers.get(currentFieldName); if (parseElement == null) { + if (currentFieldName != null && currentFieldName.equals("suggest")) { + throw new SearchParseException(context, + "suggest is not supported in [ext], please use SearchSourceBuilder#suggest(SuggestBuilder) instead", + extParser.getTokenLocation()); + } throw new SearchParseException(context, "Unknown element [" + currentFieldName + "] in [ext]", extParser.getTokenLocation()); } else { diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index d3958505d70..d675a93b691 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,28 +23,38 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** * The target that the search request was executed on. */ -public class SearchShardTarget implements Streamable, Comparable { +public class SearchShardTarget implements Writeable, Comparable { private Text nodeId; private Text index; - private int shardId; + private ShardId shardId; - private SearchShardTarget() { + public SearchShardTarget(StreamInput in) throws IOException { + if (in.readBoolean()) { + nodeId = in.readText(); + } + shardId = ShardId.readShardId(in); + index = new Text(shardId.getIndexName()); + } + public SearchShardTarget(String nodeId, ShardId shardId) { + this.nodeId = nodeId == null ? null : new Text(nodeId); + this.index = new Text(shardId.getIndexName()); + this.shardId = shardId; } public SearchShardTarget(String nodeId, Index index, int shardId) { - this.nodeId = nodeId == null ? null : new Text(nodeId); - this.index = new Text(index.getName()); - this.shardId = shardId; + this(nodeId, new ShardId(index, shardId)); } @Nullable @@ -73,36 +83,26 @@ public class SearchShardTarget implements Streamable, Comparable sorts = null; + private List> sorts = null; private HighlightBuilder highlightBuilder; private List fieldNames; private List fieldDataFields; @@ -119,6 +116,9 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder sort) { if (sort == null) { throw new IllegalArgumentException("[sort] must not be null: [" + name + "]"); } - try { - if (sorts == null) { + if (sorts == null) { sorts = new ArrayList<>(); - } - // NORELEASE when sort has been refactored and made writeable - // add the sortBuilcer to the List directly instead of - // serialising to XContent - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - sort.toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - sorts.add(builder.bytes()); - } catch (IOException e) { - throw new RuntimeException(e); } + sorts.add(sort); return this; } /** * Adds a sort builder. */ - public TopHitsAggregatorBuilder sorts(List sorts) { + public TopHitsAggregatorBuilder sorts(List> sorts) { if (sorts == null) { throw new IllegalArgumentException("[sorts] must not be null: [" + name + "]"); } if (this.sorts == null) { this.sorts = new ArrayList<>(); } - for (BytesReference sort : sorts) { + for (SortBuilder sort : sorts) { this.sorts.add(sort); } return this; @@ -181,7 +173,7 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder sorts() { + public List> sorts() { return sorts; } @@ -509,10 +501,8 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder sort : sorts) { + sort.toXContent(builder, params); } builder.endArray(); } @@ -562,9 +552,9 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder sorts = new ArrayList<>(); + List> sorts = new ArrayList<>(); for (int i = 0; i < size; i++) { - sorts.add(in.readBytesReference()); + sorts.add(in.readSortBuilder()); } factory.sorts = sorts; } @@ -612,8 +602,8 @@ public class TopHitsAggregatorBuilder extends AggregatorBuilder sort : sorts) { + out.writeSortBuilder(sort); } } out.writeBoolean(trackScores); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java index c4d2165e308..be10f4ce878 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorFactory.java @@ -19,12 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.tophits; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.common.xcontent.XContentParser; +import org.apache.lucene.search.Sort; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.Aggregator; @@ -35,27 +30,27 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext; -import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField; +import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.internal.SubSearchContext; -import org.elasticsearch.search.sort.SortParseElement; +import org.elasticsearch.search.sort.SortBuilder; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; public class TopHitsAggregatorFactory extends AggregatorFactory { - private static final SortParseElement sortParseElement = new SortParseElement(); private final int from; private final int size; private final boolean explain; private final boolean version; private final boolean trackScores; - private final List sorts; + private final List> sorts; private final HighlightBuilder highlightBuilder; private final List fieldNames; private final List fieldDataFields; @@ -63,7 +58,7 @@ public class TopHitsAggregatorFactory extends AggregatorFactory sorts, HighlightBuilder highlightBuilder, List fieldNames, List fieldDataFields, + List> sorts, HighlightBuilder highlightBuilder, List fieldNames, List fieldDataFields, List scriptFields, FetchSourceContext fetchSourceContext, AggregationContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, type, context, parent, subFactories, metaData); @@ -90,27 +85,9 @@ public class TopHitsAggregatorFactory extends AggregatorFactory optionalSort = SortBuilder.buildSort(sorts, subSearchContext.getQueryShardContext()); + if (optionalSort.isPresent()) { + subSearchContext.sort(optionalSort.get()); } } if (fieldNames != null) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java index 03921d620ee..9ec6b46dafe 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java @@ -19,9 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.tophits; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.script.Script; @@ -30,6 +27,8 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilder; +import org.elasticsearch.search.sort.SortBuilder; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -124,9 +123,7 @@ public class TopHitsParser implements Aggregator.Parser { } else if (context.parseFieldMatcher().match(currentFieldName, SearchSourceBuilder.HIGHLIGHT_FIELD)) { factory.highlighter(HighlightBuilder.PROTOTYPE.fromXContent(context)); } else if (context.parseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SORT_FIELD)) { - List sorts = new ArrayList<>(); - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - sorts.add(xContentBuilder.bytes()); + List> sorts = SortBuilder.fromXContent(context); factory.sorts(sorts); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", @@ -157,11 +154,7 @@ public class TopHitsParser implements Aggregator.Parser { } factory.fieldDataFields(fieldDataFields); } else if (context.parseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SORT_FIELD)) { - List sorts = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - sorts.add(xContentBuilder.bytes()); - } + List> sorts = SortBuilder.fromXContent(context); factory.sorts(sorts); } else if (context.parseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) { factory.fetchSource(FetchSourceContext.parse(parser, context)); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java index 1289da661c2..cea99cf868e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/DerivativePipelineAggregator.java @@ -92,7 +92,7 @@ public class DerivativePipelineAggregator extends PipelineAggregator { for (InternalHistogram.Bucket bucket : buckets) { Long thisBucketKey = resolveBucketKeyAsLong(bucket); Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); - if (lastBucketValue != null) { + if (lastBucketValue != null && thisBucketValue != null) { double gradient = thisBucketValue - lastBucketValue; double xDiff = -1; if (xAxisUnits != null) { diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 0cb83dbd2f9..c83794ced90 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregatorBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -51,10 +52,12 @@ import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; import java.util.ArrayList; @@ -105,9 +108,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ return PROTOTYPE.readFrom(in); } - public static SearchSourceBuilder parseSearchSource(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) + public static SearchSourceBuilder parseSearchSource(XContentParser parser, QueryParseContext context, + AggregatorParsers aggParsers, Suggesters suggesters) throws IOException { - return PROTOTYPE.fromXContent(parser, context, aggParsers); + return PROTOTYPE.fromXContent(parser, context, aggParsers, suggesters); } /** @@ -136,7 +140,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private Boolean version; - private List sorts; + private List> sorts; private boolean trackScores = false; @@ -156,7 +160,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private HighlightBuilder highlightBuilder; - private BytesReference suggestBuilder; + private SuggestBuilder suggestBuilder; private BytesReference innerHitsBuilder; @@ -333,6 +337,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * The sort ordering */ public SearchSourceBuilder sort(String name, SortOrder order) { + if (name.equals(ScoreSortBuilder.NAME)) { + return sort(SortBuilders.scoreSort().order(order)); + } return sort(SortBuilders.fieldSort(name).order(order)); } @@ -343,32 +350,27 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * The name of the field to sort by */ public SearchSourceBuilder sort(String name) { + if (name.equals(ScoreSortBuilder.NAME)) { + return sort(SortBuilders.scoreSort()); + } return sort(SortBuilders.fieldSort(name)); } /** * Adds a sort builder. */ - public SearchSourceBuilder sort(SortBuilder sort) { - try { + public SearchSourceBuilder sort(SortBuilder sort) { if (sorts == null) { sorts = new ArrayList<>(); } - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - sort.toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - sorts.add(builder.bytes()); + sorts.add(sort); return this; - } catch (IOException e) { - throw new RuntimeException(e); - } } /** * Gets the bytes representing the sort builders for this request. */ - public List sorts() { + public List> sorts() { return sorts; } @@ -475,20 +477,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } public SearchSourceBuilder suggest(SuggestBuilder suggestBuilder) { - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - suggestBuilder.toXContent(builder, EMPTY_PARAMS); - this.suggestBuilder = builder.bytes(); - return this; - } catch (IOException e) { - throw new RuntimeException(e); - } + this.suggestBuilder = suggestBuilder; + return this; } /** - * Gets the bytes representing the suggester builder for this request. + * Gets the suggester builder for this request. */ - public BytesReference suggest() { + public SuggestBuilder suggest() { return suggestBuilder; } @@ -733,22 +729,86 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ return ext; } + /** + * @return true if the source only has suggest + */ + public boolean isSuggestOnly() { + return suggestBuilder != null + && queryBuilder == null && aggregations == null; + } + + /** + * Rewrites this search source builder into its primitive form. e.g. by + * rewriting the QueryBuilder. If the builder did not change the identity + * reference must be returned otherwise the builder will be rewritten + * infinitely. + */ + public SearchSourceBuilder rewrite(QueryShardContext context) throws IOException { + assert (this.equals(shallowCopy(queryBuilder, postQueryBuilder))); + QueryBuilder queryBuilder = null; + if (this.queryBuilder != null) { + queryBuilder = this.queryBuilder.rewrite(context); + } + QueryBuilder postQueryBuilder = null; + if (this.postQueryBuilder != null) { + postQueryBuilder = this.postQueryBuilder.rewrite(context); + } + boolean rewritten = queryBuilder != this.queryBuilder || postQueryBuilder != this.postQueryBuilder; + if (rewritten) { + return shallowCopy(queryBuilder, postQueryBuilder); + } + return this; + } + + private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder postQueryBuilder) { + SearchSourceBuilder rewrittenBuilder = new SearchSourceBuilder(); + rewrittenBuilder.aggregations = aggregations; + rewrittenBuilder.explain = explain; + rewrittenBuilder.ext = ext; + rewrittenBuilder.fetchSourceContext = fetchSourceContext; + rewrittenBuilder.fieldDataFields = fieldDataFields; + rewrittenBuilder.fieldNames = fieldNames; + rewrittenBuilder.from = from; + rewrittenBuilder.highlightBuilder = highlightBuilder; + rewrittenBuilder.indexBoost = indexBoost; + rewrittenBuilder.innerHitsBuilder = innerHitsBuilder; + rewrittenBuilder.minScore = minScore; + rewrittenBuilder.postQueryBuilder = postQueryBuilder; + rewrittenBuilder.profile = profile; + rewrittenBuilder.queryBuilder = queryBuilder; + rewrittenBuilder.rescoreBuilders = rescoreBuilders; + rewrittenBuilder.scriptFields = scriptFields; + rewrittenBuilder.searchAfterBuilder = searchAfterBuilder; + rewrittenBuilder.size = size; + rewrittenBuilder.sorts = sorts; + rewrittenBuilder.stats = stats; + rewrittenBuilder.suggestBuilder = suggestBuilder; + rewrittenBuilder.terminateAfter = terminateAfter; + rewrittenBuilder.timeoutInMillis = timeoutInMillis; + rewrittenBuilder.trackScores = trackScores; + rewrittenBuilder.version = version; + return rewrittenBuilder; + } + /** * Create a new SearchSourceBuilder with attributes set by an xContent. */ - public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) + public SearchSourceBuilder fromXContent(XContentParser parser, QueryParseContext context, + AggregatorParsers aggParsers, Suggesters suggesters) throws IOException { SearchSourceBuilder builder = new SearchSourceBuilder(); - builder.parseXContent(parser, context, aggParsers); + builder.parseXContent(parser, context, aggParsers, suggesters); return builder; } /** * Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up * different defaults than a regular SearchSourceBuilder would have and use - * {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers)} if you have normal defaults. + * {@link #fromXContent(XContentParser, QueryParseContext, AggregatorParsers, Suggesters)} if you have normal defaults. */ - public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers) throws IOException { + public void parseXContent(XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers, Suggesters suggesters) + throws IOException { + XContentParser.Token token = parser.currentToken(); String currentFieldName = null; if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) { @@ -852,12 +912,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); innerHitsBuilder = xContentBuilder.bytes(); } else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - suggestBuilder = xContentBuilder.bytes(); + suggestBuilder = SuggestBuilder.fromXContent(context, suggesters); } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { - sorts = new ArrayList<>(); - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - sorts.add(xContentBuilder.bytes()); + sorts = new ArrayList<>(SortBuilder.fromXContent(context)); } else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); ext = xContentBuilder.bytes(); @@ -888,11 +945,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } } } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { - sorts = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - sorts.add(xContentBuilder.bytes()); - } + sorts = new ArrayList<>(SortBuilder.fromXContent(context)); } else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) { rescoreBuilders = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { @@ -1005,10 +1058,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (sorts != null) { builder.startArray(SORT_FIELD.getPreferredName()); - for (BytesReference sort : sorts) { - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(sort); - parser.nextToken(); - builder.copyCurrentStructure(parser); + for (SortBuilder sort : sorts) { + sort.toXContent(builder, params); } builder.endArray(); } @@ -1050,10 +1101,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } if (suggestBuilder != null) { - builder.field(SUGGEST_FIELD.getPreferredName()); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(suggestBuilder); - parser.nextToken(); - builder.copyCurrentStructure(parser); + builder.field(SUGGEST_FIELD.getPreferredName(), suggestBuilder); } if (rescoreBuilders != null) { @@ -1217,9 +1265,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.size = in.readVInt(); if (in.readBoolean()) { int size = in.readVInt(); - List sorts = new ArrayList<>(); + List> sorts = new ArrayList<>(); for (int i = 0; i < size; i++) { - sorts.add(in.readBytesReference()); + sorts.add(in.readSortBuilder()); } builder.sorts = sorts; } @@ -1232,7 +1280,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.stats = stats; } if (in.readBoolean()) { - builder.suggestBuilder = in.readBytesReference(); + builder.suggestBuilder = SuggestBuilder.PROTOTYPE.readFrom(in); } builder.terminateAfter = in.readVInt(); builder.timeoutInMillis = in.readLong(); @@ -1333,8 +1381,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ out.writeBoolean(hasSorts); if (hasSorts) { out.writeVInt(sorts.size()); - for (BytesReference sort : sorts) { - out.writeBytesReference(sort); + for (SortBuilder sort : sorts) { + out.writeSortBuilder(sort); } } boolean hasStats = stats != null; @@ -1348,7 +1396,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ boolean hasSuggestBuilder = suggestBuilder != null; out.writeBoolean(hasSuggestBuilder); if (hasSuggestBuilder) { - out.writeBytesReference(suggestBuilder); + suggestBuilder.writeTo(out); } out.writeVInt(terminateAfter); out.writeLong(timeoutInMillis); diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index ad8e71f5b93..d5d4607fba9 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -76,7 +76,7 @@ public class SearchPhaseController extends AbstractComponent { public int compare(AtomicArray.Entry o1, AtomicArray.Entry o2) { int i = o1.value.shardTarget().index().compareTo(o2.value.shardTarget().index()); if (i == 0) { - i = o1.value.shardTarget().shardId() - o2.value.shardTarget().shardId(); + i = o1.value.shardTarget().shardId().id() - o2.value.shardTarget().shardId().id(); } return i; } @@ -386,7 +386,7 @@ public class SearchPhaseController extends AbstractComponent { Suggest.group(groupedSuggestions, shardResult); } - suggest = hasSuggestions ? new Suggest(Suggest.Fields.SUGGEST, Suggest.reduce(groupedSuggestions)) : null; + suggest = hasSuggestions ? new Suggest(Suggest.reduce(groupedSuggestions)) : null; } // merge addAggregation diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index fb0fc75299f..dbaee5b64bb 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQueryFetchSearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); result = readQueryFetchSearchResult(in); result.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java index 39c432f9cc9..f9cf3f09f39 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.highlight.HighlighterParseElement; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.SortParseElement; import java.io.IOException; import java.util.HashMap; @@ -51,8 +50,8 @@ public class InnerHitsFetchSubPhase implements FetchSubPhase { private FetchPhase fetchPhase; @Inject - public InnerHitsFetchSubPhase(SortParseElement sortParseElement, FetchSourceParseElement sourceParseElement, HighlighterParseElement highlighterParseElement, FieldDataFieldsParseElement fieldDataFieldsParseElement, ScriptFieldsParseElement scriptFieldsParseElement) { - parseElements = singletonMap("inner_hits", new InnerHitsParseElement(sortParseElement, sourceParseElement, highlighterParseElement, + public InnerHitsFetchSubPhase(FetchSourceParseElement sourceParseElement, HighlighterParseElement highlighterParseElement, FieldDataFieldsParseElement fieldDataFieldsParseElement, ScriptFieldsParseElement scriptFieldsParseElement) { + parseElements = singletonMap("inner_hits", new InnerHitsParseElement(sourceParseElement, highlighterParseElement, fieldDataFieldsParseElement, scriptFieldsParseElement)); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java index 077268ac52f..fc1b2bf399f 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java @@ -32,7 +32,6 @@ import org.elasticsearch.search.fetch.source.FetchSourceParseElement; import org.elasticsearch.search.highlight.HighlighterParseElement; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SubSearchContext; -import org.elasticsearch.search.sort.SortParseElement; import java.util.HashMap; import java.util.Map; @@ -43,14 +42,12 @@ import static org.elasticsearch.index.query.support.InnerHitsQueryParserHelper.p */ public class InnerHitsParseElement implements SearchParseElement { - private final SortParseElement sortParseElement; private final FetchSourceParseElement sourceParseElement; private final HighlighterParseElement highlighterParseElement; private final FieldDataFieldsParseElement fieldDataFieldsParseElement; private final ScriptFieldsParseElement scriptFieldsParseElement; - public InnerHitsParseElement(SortParseElement sortParseElement, FetchSourceParseElement sourceParseElement, HighlighterParseElement highlighterParseElement, FieldDataFieldsParseElement fieldDataFieldsParseElement, ScriptFieldsParseElement scriptFieldsParseElement) { - this.sortParseElement = sortParseElement; + public InnerHitsParseElement(FetchSourceParseElement sourceParseElement, HighlighterParseElement highlighterParseElement, FieldDataFieldsParseElement fieldDataFieldsParseElement, ScriptFieldsParseElement scriptFieldsParseElement) { this.sourceParseElement = sourceParseElement; this.highlighterParseElement = highlighterParseElement; this.fieldDataFieldsParseElement = fieldDataFieldsParseElement; @@ -184,10 +181,10 @@ public class InnerHitsParseElement implements SearchParseElement { } else if ("inner_hits".equals(fieldName)) { childInnerHits = parseInnerHits(parser, context, searchContext); } else { - parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sortParseElement, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); + parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); } } else { - parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sortParseElement, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); + parseCommonInnerHitOptions(parser, token, fieldName, subSearchContext, sourceParseElement, highlighterParseElement, scriptFieldsParseElement, fieldDataFieldsParseElement); } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 8c3c19343b4..f3fe48f6682 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; @@ -190,6 +191,9 @@ public class DefaultSearchContext extends SearchContext { */ @Override public void preProcess() { + if (hasOnlySuggest() ) { + return; + } if (scrollContext == null) { long from = from() == -1 ? 0 : from(); long size = size() == -1 ? 10 : size(); @@ -487,6 +491,11 @@ public class DefaultSearchContext extends SearchContext { return indexService.fieldData(); } + @Override + public PercolatorQueryCache percolatorQueryCache() { + return indexService.cache().getPercolatorQueryCache(); + } + @Override public long timeoutInMillis() { return timeoutInMillis; diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 83ea2b1ccd8..fedab3f9782 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -274,6 +275,11 @@ public abstract class FilteredSearchContext extends SearchContext { return in.fieldData(); } + @Override + public PercolatorQueryCache percolatorQueryCache() { + return in.percolatorQueryCache(); + } + @Override public long timeoutInMillis() { return in.timeoutInMillis(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index c6afe325bb3..dcbcce503a4 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -55,7 +55,6 @@ import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.lucene.Lucene.readExplanation; import static org.elasticsearch.common.lucene.Lucene.writeExplanation; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.highlight.HighlightField.readHighlightField; import static org.elasticsearch.search.internal.InternalSearchHitField.readSearchHitField; @@ -638,7 +637,7 @@ public class InternalSearchHit implements SearchHit { if (context.streamShardTarget() == ShardTargetType.STREAM) { if (in.readBoolean()) { - shard = readSearchShardTarget(in); + shard = new SearchShardTarget(in); } } else if (context.streamShardTarget() == ShardTargetType.LOOKUP) { int lookupId = in.readVInt(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java index 9e787cf2aa9..09d11e1a1a3 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHits.java @@ -34,7 +34,6 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.Map; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit; /** @@ -216,7 +215,7 @@ public class InternalSearchHits implements SearchHits { // read the lookup table first int lookupSize = in.readVInt(); for (int i = 0; i < lookupSize; i++) { - context.handleShardLookup().put(in.readVInt(), readSearchShardTarget(in)); + context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in)); } } @@ -262,4 +261,4 @@ public class InternalSearchHits implements SearchHits { } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index b8255e0bb52..1a2e1f70191 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -134,7 +134,7 @@ public class InternalSearchResponse implements Streamable, ToXContent { aggregations = InternalAggregations.readAggregations(in); } if (in.readBoolean()) { - suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); + suggest = Suggest.readSuggest(in); } timedOut = in.readBoolean(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 2b35e182161..f9d0b6b9283 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -38,6 +38,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -148,6 +149,10 @@ public abstract class SearchContext implements Releasable { return nowInMillisUsed; } + public final void resetNowInMillisUsed() { + this.nowInMillisUsed = false; + } + protected abstract long nowInMillisImpl(); public abstract ScrollContext scrollContext(); @@ -217,6 +222,8 @@ public abstract class SearchContext implements Releasable { public abstract IndexFieldDataService fieldData(); + public abstract PercolatorQueryCache percolatorQueryCache(); + public abstract long timeoutInMillis(); public abstract void timeoutInMillis(long timeoutInMillis); @@ -346,6 +353,14 @@ public abstract class SearchContext implements Releasable { } } + /** + * @return true if the request contains only suggest + */ + public final boolean hasOnlySuggest() { + return request().source() != null + && request().source().isSuggestOnly(); + } + /** * Looks up the given field, but does not restrict to fields in the types set on this context. */ diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0f46461f4a2..31192350308 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; @@ -58,8 +59,7 @@ import static org.elasticsearch.search.Scroll.readScroll; public class ShardSearchLocalRequest implements ShardSearchRequest { - private String index; - private int shardId; + private ShardId shardId; private int numberOfShards; private SearchType searchType; private Scroll scroll; @@ -97,8 +97,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, Boolean requestCache) { - this.index = shardId.getIndexName(); - this.shardId = shardId.id(); + this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; @@ -106,13 +105,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { this.requestCache = requestCache; } - @Override - public String index() { - return index; - } @Override - public int shardId() { + public ShardId shardId() { return shardId; } @@ -177,8 +172,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { - index = in.readString(); - shardId = in.readVInt(); + shardId = ShardId.readShardId(in); searchType = SearchType.fromId(in.readByte()); numberOfShards = in.readVInt(); if (in.readBoolean()) { @@ -195,8 +189,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { - out.writeString(index); - out.writeVInt(shardId); + shardId.writeTo(out); out.writeByte(searchType.id()); if (!asKey) { out.writeVInt(numberOfShards); @@ -232,4 +225,15 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { // we could potentially keep it without copying, but then pay the price of extra unused bytes up to a page return out.bytes().copyBytesArray(); } + + @Override + public void rewrite(QueryShardContext context) throws IOException { + SearchSourceBuilder source = this.source; + SearchSourceBuilder rewritten = null; + while (rewritten != source) { + rewritten = source.rewrite(context); + source = rewritten; + } + this.source = source; + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 1f0b3d1f188..aa148e215c8 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -21,6 +21,8 @@ package org.elasticsearch.search.internal; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -34,9 +36,7 @@ import java.io.IOException; */ public interface ShardSearchRequest { - String index(); - - int shardId(); + ShardId shardId(); String[] types(); @@ -73,4 +73,10 @@ public interface ShardSearchRequest { * Returns the cache key for this shard search request, based on its content */ BytesReference cacheKey() throws IOException; + + /** + * Rewrites this request into its primitive form. e.g. by rewriting the + * QueryBuilder. + */ + void rewrite(QueryShardContext context) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 48ea31c170a..cd6460a686f 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,6 +28,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.Template; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -71,13 +73,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha return originalIndices.indicesOptions(); } - @Override - public String index() { - return shardSearchLocalRequest.index(); - } @Override - public int shardId() { + public ShardId shardId() { return shardSearchLocalRequest.shardId(); } @@ -159,4 +157,16 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha public boolean isProfile() { return shardSearchLocalRequest.isProfile(); } + + @Override + public void rewrite(QueryShardContext context) throws IOException { + shardSearchLocalRequest.rewrite(context); + } + + private ShardSearchTransportRequest shallowCopy(ShardSearchLocalRequest rewritten) { + ShardSearchTransportRequest newRequest = new ShardSearchTransportRequest(); + newRequest.originalIndices = originalIndices; + newRequest.shardSearchLocalRequest = rewritten; + return newRequest; + } } diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 5a98744505a..62210655a00 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -58,7 +58,6 @@ import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.Profiler; import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.rescore.RescoreSearchContext; -import org.elasticsearch.search.sort.SortParseElement; import org.elasticsearch.search.sort.TrackScoresParseElement; import org.elasticsearch.search.suggest.SuggestPhase; @@ -98,7 +97,6 @@ public class QueryPhase implements SearchPhase { parseElements.put("query", new QueryParseElement()); parseElements.put("post_filter", new PostFilterParseElement()); parseElements.put("postFilter", new PostFilterParseElement()); - parseElements.put("sort", new SortParseElement()); parseElements.put("trackScores", new TrackScoresParseElement()); parseElements.put("track_scores", new TrackScoresParseElement()); parseElements.put("min_score", new MinScoreParseElement()); @@ -118,6 +116,12 @@ public class QueryPhase implements SearchPhase { @Override public void execute(SearchContext searchContext) throws QueryPhaseExecutionException { + if (searchContext.hasOnlySuggest()) { + suggestPhase.execute(searchContext); + // TODO: fix this once we can fetch docs for suggestions + searchContext.queryResult().topDocs(new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, 0)); + return; + } // Pre-process aggregations as late as possible. In the case of a DFS_Q_T_F // request, preProcess is called on the DFS phase phase, this is why we pre-process them // here to make sure it happens during the QUERY phase diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 9223eb5a82d..2b82633ebfd 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -207,7 +207,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.pipelineAggregators = pipelineAggregators; } if (in.readBoolean()) { - suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); + suggest = Suggest.readSuggest(in); } searchTimedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); diff --git a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index ebb7615da44..bcdd94adf89 100644 --- a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -26,7 +26,6 @@ import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -import static org.elasticsearch.search.SearchShardTarget.readSearchShardTarget; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; /** @@ -56,7 +55,7 @@ public class ScrollQuerySearchResult extends TransportResponse { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = readSearchShardTarget(in); + shardTarget = new SearchShardTarget(in); queryResult = readQuerySearchResult(in); queryResult.shardTarget(shardTarget); } diff --git a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java index b0d5a325e5a..959bd51270b 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java @@ -85,10 +85,7 @@ public enum QueryRescoreMode implements Writeable { public abstract float combine(float primary, float secondary); - static QueryRescoreMode PROTOTYPE = Total; - - @Override - public QueryRescoreMode readFrom(StreamInput in) throws IOException { + public static QueryRescoreMode readFromStream(StreamInput in) throws IOException { int ordinal = in.readVInt(); if (ordinal < 0 || ordinal >= values().length) { throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]"); diff --git a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java index c65fca79a97..85564265579 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -39,8 +38,6 @@ public class QueryRescorerBuilder extends RescoreBuilder { public static final String NAME = "query"; - public static final QueryRescorerBuilder PROTOTYPE = new QueryRescorerBuilder(new MatchAllQueryBuilder()); - public static final float DEFAULT_RESCORE_QUERYWEIGHT = 1.0f; public static final float DEFAULT_QUERYWEIGHT = 1.0f; public static final QueryRescoreMode DEFAULT_SCORE_MODE = QueryRescoreMode.Total; @@ -77,6 +74,25 @@ public class QueryRescorerBuilder extends RescoreBuilder { this.queryBuilder = builder; } + /** + * Read from a stream. + */ + public QueryRescorerBuilder(StreamInput in) throws IOException { + super(in); + queryBuilder = in.readQuery(); + scoreMode = QueryRescoreMode.readFromStream(in); + rescoreQueryWeight = in.readFloat(); + queryWeight = in.readFloat(); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + out.writeQuery(queryBuilder); + scoreMode.writeTo(out); + out.writeFloat(rescoreQueryWeight); + out.writeFloat(queryWeight); + } + /** * @return the query used for this rescore query */ @@ -140,9 +156,9 @@ public class QueryRescorerBuilder extends RescoreBuilder { builder.endObject(); } - public QueryRescorerBuilder fromXContent(QueryParseContext parseContext) throws IOException { - InnerBuilder innerBuilder = QUERY_RESCORE_PARSER.parse(parseContext.parser(), new InnerBuilder(), parseContext); - return innerBuilder.build(); + public static QueryRescorerBuilder fromXContent(QueryParseContext parseContext) throws IOException { + InnerBuilder innerBuilder = QUERY_RESCORE_PARSER.parse(parseContext.parser(), new InnerBuilder(), parseContext); + return innerBuilder.build(); } @Override @@ -181,23 +197,6 @@ public class QueryRescorerBuilder extends RescoreBuilder { Objects.equals(queryBuilder, other.queryBuilder); } - @Override - public QueryRescorerBuilder doReadFrom(StreamInput in) throws IOException { - QueryRescorerBuilder rescorer = new QueryRescorerBuilder(in.readQuery()); - rescorer.setScoreMode(QueryRescoreMode.PROTOTYPE.readFrom(in)); - rescorer.setRescoreQueryWeight(in.readFloat()); - rescorer.setQueryWeight(in.readFloat()); - return rescorer; - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeQuery(queryBuilder); - scoreMode.writeTo(out); - out.writeFloat(rescoreQueryWeight); - out.writeFloat(queryWeight); - } - @Override public String getWriteableName() { return NAME; @@ -208,7 +207,7 @@ public class QueryRescorerBuilder extends RescoreBuilder { * for the constructor of {@link QueryRescorerBuilder}, but {@link ObjectParser} only * allows filling properties of an already constructed value. */ - private class InnerBuilder { + private static class InnerBuilder { private QueryBuilder queryBuilder; private float rescoreQueryWeight = DEFAULT_RESCORE_QUERYWEIGHT; diff --git a/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java b/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java index 8dad07a5430..32885380866 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java @@ -46,6 +46,27 @@ public abstract class RescoreBuilder> implements T private static ParseField WINDOW_SIZE_FIELD = new ParseField("window_size"); + /** + * Construct an empty RescoreBuilder. + */ + public RescoreBuilder() { + } + + /** + * Read from a stream. + */ + protected RescoreBuilder(StreamInput in) throws IOException { + windowSize = in.readOptionalVInt(); + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(this.windowSize); + doWriteTo(out); + } + + protected abstract void doWriteTo(StreamOutput out) throws IOException; + @SuppressWarnings("unchecked") public RB windowSize(int windowSize) { this.windowSize = windowSize; @@ -74,7 +95,7 @@ public abstract class RescoreBuilder> implements T } else if (token == XContentParser.Token.START_OBJECT) { // we only have QueryRescorer at this point if (QueryRescorerBuilder.NAME.equals(fieldName)) { - rescorer = QueryRescorerBuilder.PROTOTYPE.fromXContent(parseContext); + rescorer = QueryRescorerBuilder.fromXContent(parseContext); } else { throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support rescorer with name [" + fieldName + "]"); } @@ -128,23 +149,6 @@ public abstract class RescoreBuilder> implements T return Objects.equals(windowSize, other.windowSize); } - @Override - public RB readFrom(StreamInput in) throws IOException { - RB builder = doReadFrom(in); - builder.windowSize = in.readOptionalVInt(); - return builder; - } - - protected abstract RB doReadFrom(StreamInput in) throws IOException; - - @Override - public void writeTo(StreamOutput out) throws IOException { - doWriteTo(out); - out.writeOptionalVInt(this.windowSize); - } - - protected abstract void doWriteTo(StreamOutput out) throws IOException; - @Override public final String toString() { try { diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 4f082b057da..1798c75c6a2 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,34 +19,75 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.search.SortField; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.MultiValueMode; import java.io.IOException; +import java.util.Objects; /** * A sort builder to sort based on a document field. */ -public class FieldSortBuilder extends SortBuilder { +public class FieldSortBuilder extends SortBuilder { + public static final String NAME = "field_sort"; + public static final ParseField NESTED_PATH = new ParseField("nested_path"); + public static final ParseField NESTED_FILTER = new ParseField("nested_filter"); + public static final ParseField MISSING = new ParseField("missing"); + public static final ParseField ORDER = new ParseField("order"); + public static final ParseField REVERSE = new ParseField("reverse"); + public static final ParseField SORT_MODE = new ParseField("mode"); + public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type"); + + /** + * special field name to sort by index order + */ + public static final String DOC_FIELD_NAME = "_doc"; + private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC); + private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true); private final String fieldName; - private SortOrder order; - private Object missing; private String unmappedType; - private String sortMode; + private SortMode sortMode; - private QueryBuilder nestedFilter; + private QueryBuilder nestedFilter; private String nestedPath; + /** Copy constructor. */ + public FieldSortBuilder(FieldSortBuilder template) { + this(template.fieldName); + this.order(template.order()); + this.missing(template.missing()); + this.unmappedType(template.unmappedType()); + if (template.sortMode != null) { + this.sortMode(template.sortMode()); + } + this.setNestedFilter(template.getNestedFilter()); + this.setNestedPath(template.getNestedPath()); + } + /** * Constructs a new sort based on a document field. * - * @param fieldName The field name. + * @param fieldName + * The field name. */ public FieldSortBuilder(String fieldName) { if (fieldName == null) { @@ -56,30 +97,55 @@ public class FieldSortBuilder extends SortBuilder { } /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. + * Read from a stream. */ + public FieldSortBuilder(StreamInput in) throws IOException { + fieldName = in.readString(); + nestedFilter = in.readOptionalQuery(); + nestedPath = in.readOptionalString(); + missing = in.readGenericValue(); + order = in.readOptionalWriteable(SortOrder::readFromStream); + sortMode = in.readOptionalWriteable(SortMode::readFromStream); + unmappedType = in.readOptionalString(); + } + @Override - public FieldSortBuilder order(SortOrder order) { - this.order = order; - return this; + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeOptionalQuery(nestedFilter); + out.writeOptionalString(nestedPath); + out.writeGenericValue(missing); + out.writeOptionalWriteable(order); + out.writeOptionalWriteable(sortMode); + out.writeOptionalString(unmappedType); + } + + /** Returns the document field this sort should be based on. */ + public String getFieldName() { + return this.fieldName; } /** * Sets the value when a field is missing in a doc. Can also be set to _last or * _first to sort missing last or first respectively. */ - @Override public FieldSortBuilder missing(Object missing) { this.missing = missing; return this; } + /** Returns the value used when a field is missing in a doc. */ + public Object missing() { + return missing; + } + /** * Set the type to use in case the current field is not mapped in an index. - * Specifying a type tells Elasticsearch what type the sort values should have, which is important - * for cross-index search, if there are sort fields that exist on some indices only. - * If the unmapped type is null then query execution will fail if one or more indices - * don't have a mapping for the current field. + * Specifying a type tells Elasticsearch what type the sort values should + * have, which is important for cross-index search, if there are sort fields + * that exist on some indices only. If the unmapped type is null + * then query execution will fail if one or more indices don't have a + * mapping for the current field. */ public FieldSortBuilder unmappedType(String type) { this.unmappedType = type; @@ -87,57 +153,246 @@ public class FieldSortBuilder extends SortBuilder { } /** - * Defines what values to pick in the case a document contains multiple values for the targeted sort field. - * Possible values: min, max, sum and avg + * Returns the type to use in case the current field is not mapped in an + * index. + */ + public String unmappedType() { + return this.unmappedType; + } + + /** + * Defines what values to pick in the case a document contains multiple + * values for the targeted sort field. Possible values: min, max, sum and + * avg + * *

    * The last two values are only applicable for number based fields. */ - public FieldSortBuilder sortMode(String sortMode) { + public FieldSortBuilder sortMode(SortMode sortMode) { + Objects.requireNonNull(sortMode, "sort mode cannot be null"); this.sortMode = sortMode; return this; } /** - * Sets the nested filter that the nested objects should match with in order to be taken into account - * for sorting. + * Returns what values to pick in the case a document contains multiple + * values for the targeted sort field. */ - public FieldSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public SortMode sortMode() { + return this.sortMode; + } + + /** + * Sets the nested filter that the nested objects should match with in order + * to be taken into account for sorting. + * + * TODO should the above getters and setters be deprecated/ changed in + * favour of real getters and setters? + */ + public FieldSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } + /** + * Returns the nested filter that the nested objects should match with in + * order to be taken into account for sorting. + */ + public QueryBuilder getNestedFilter() { + return this.nestedFilter; + } /** - * Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a - * field inside a nested object, the nearest upper nested object is selected as nested path. + * Sets the nested path if sorting occurs on a field that is inside a nested + * object. By default when sorting on a field inside a nested object, the + * nearest upper nested object is selected as nested path. */ public FieldSortBuilder setNestedPath(String nestedPath) { this.nestedPath = nestedPath; return this; } + /** + * Returns the nested path if sorting occurs in a field that is inside a + * nested object. + */ + public String getNestedPath() { + return this.nestedPath; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startObject(fieldName); - if (order != null) { - builder.field("order", order.toString()); - } + builder.field(ORDER_FIELD.getPreferredName(), order); if (missing != null) { - builder.field("missing", missing); + builder.field(MISSING.getPreferredName(), missing); } if (unmappedType != null) { - builder.field(SortParseElement.UNMAPPED_TYPE.getPreferredName(), unmappedType); + builder.field(UNMAPPED_TYPE.getPreferredName(), unmappedType); } if (sortMode != null) { - builder.field("mode", sortMode); + builder.field(SORT_MODE.getPreferredName(), sortMode); } if (nestedFilter != null) { - builder.field("nested_filter", nestedFilter, params); + builder.field(NESTED_FILTER.getPreferredName(), nestedFilter, params); } if (nestedPath != null) { - builder.field("nested_path", nestedPath); + builder.field(NESTED_PATH.getPreferredName(), nestedPath); } builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public SortField build(QueryShardContext context) throws IOException { + if (DOC_FIELD_NAME.equals(fieldName)) { + if (order == SortOrder.DESC) { + return SORT_DOC_REVERSE; + } else { + return SORT_DOC; + } + } else { + MappedFieldType fieldType = context.fieldMapper(fieldName); + if (fieldType == null) { + if (unmappedType != null) { + fieldType = context.getMapperService().unmappedFieldType(unmappedType); + } else { + throw new QueryShardException(context, "No mapping found for [" + fieldName + "] in order to sort on"); + } + } + + if (!fieldType.isSortable()) { + throw new QueryShardException(context, "Sorting not supported for field[" + fieldName + "]"); + } + + MultiValueMode localSortMode = null; + if (sortMode != null) { + localSortMode = MultiValueMode.fromString(sortMode.toString()); + } + + if (fieldType.isNumeric() == false && (sortMode == SortMode.SUM || sortMode == SortMode.AVG || sortMode == SortMode.MEDIAN)) { + throw new QueryShardException(context, "we only support AVG, MEDIAN and SUM on number based fields"); + } + + boolean reverse = (order == SortOrder.DESC); + if (localSortMode == null) { + localSortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; + } + + final Nested nested = resolveNested(context, nestedPath, nestedFilter); + IndexFieldData.XFieldComparatorSource fieldComparatorSource = context.getForField(fieldType) + .comparatorSource(missing, localSortMode, nested); + return new SortField(fieldType.name(), fieldComparatorSource, reverse); + } + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FieldSortBuilder builder = (FieldSortBuilder) other; + return (Objects.equals(this.fieldName, builder.fieldName) && Objects.equals(this.nestedFilter, builder.nestedFilter) + && Objects.equals(this.nestedPath, builder.nestedPath) && Objects.equals(this.missing, builder.missing) + && Objects.equals(this.order, builder.order) && Objects.equals(this.sortMode, builder.sortMode) + && Objects.equals(this.unmappedType, builder.unmappedType)); + } + + @Override + public int hashCode() { + return Objects.hash(this.fieldName, this.nestedFilter, this.nestedPath, this.missing, this.order, this.sortMode, this.unmappedType); + } + + @Override + public String getWriteableName() { + return NAME; + } + + /** + * Creates a new {@link FieldSortBuilder} from the query held by the {@link QueryParseContext} in + * {@link org.elasticsearch.common.xcontent.XContent} format. + * + * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this + * method call + * @param fieldName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g. + * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument + */ + public static FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException { + XContentParser parser = context.parser(); + + QueryBuilder nestedFilter = null; + String nestedPath = null; + Object missing = null; + SortOrder order = null; + SortMode sortMode = null; + String unmappedType = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (context.parseFieldMatcher().match(currentFieldName, NESTED_FILTER)) { + nestedFilter = context.parseInnerQueryBuilder(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected " + NESTED_FILTER.getPreferredName() + " element."); + } + } else if (token.isValue()) { + if (context.parseFieldMatcher().match(currentFieldName, NESTED_PATH)) { + nestedPath = parser.text(); + } else if (context.parseFieldMatcher().match(currentFieldName, MISSING)) { + missing = parser.objectText(); + } else if (context.parseFieldMatcher().match(currentFieldName, REVERSE)) { + if (parser.booleanValue()) { + order = SortOrder.DESC; + } + // else we keep the default ASC + } else if (context.parseFieldMatcher().match(currentFieldName, ORDER)) { + String sortOrder = parser.text(); + if ("asc".equals(sortOrder)) { + order = SortOrder.ASC; + } else if ("desc".equals(sortOrder)) { + order = SortOrder.DESC; + } else { + throw new IllegalStateException("Sort order " + sortOrder + " not supported."); + } + } else if (context.parseFieldMatcher().match(currentFieldName, SORT_MODE)) { + sortMode = SortMode.fromString(parser.text()); + } else if (context.parseFieldMatcher().match(currentFieldName, UNMAPPED_TYPE)) { + unmappedType = parser.text(); + } else { + throw new IllegalArgumentException("Option " + currentFieldName + " not supported."); + } + } + } + + FieldSortBuilder builder = new FieldSortBuilder(fieldName); + if (nestedFilter != null) { + builder.setNestedFilter(nestedFilter); + } + if (nestedPath != null) { + builder.setNestedPath(nestedPath); + } + if (missing != null) { + builder.missing(missing); + } + if (order != null) { + builder.order(order); + } + if (sortMode != null) { + builder.sortMode(sortMode); + } + if (unmappedType != null) { + builder.unmappedType(unmappedType); + } return builder; } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index e37eed61c6d..4a8bd6d1d9a 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -19,19 +19,35 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.geo.GeoDistance; +import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.DistanceUnit; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.MultiValueMode; import java.io.IOException; @@ -44,27 +60,31 @@ import java.util.Objects; /** * A geo distance based sorting on a geo point like field. */ -public class GeoDistanceSortBuilder extends SortBuilder - implements ToXContent, NamedWriteable, SortElementParserTemp { +public class GeoDistanceSortBuilder extends SortBuilder { public static final String NAME = "_geo_distance"; + public static final String ALTERNATIVE_NAME = "_geoDistance"; public static final boolean DEFAULT_COERCE = false; public static final boolean DEFAULT_IGNORE_MALFORMED = false; - - static final GeoDistanceSortBuilder PROTOTYPE = new GeoDistanceSortBuilder("", -1, -1); + public static final ParseField UNIT_FIELD = new ParseField("unit"); + public static final ParseField REVERSE_FIELD = new ParseField("reverse"); + public static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); + public static final ParseField COERCE_FIELD = new ParseField("coerce", "normalize"); + public static final ParseField IGNORE_MALFORMED_FIELD = new ParseField("ignore_malformed"); + public static final ParseField SORTMODE_FIELD = new ParseField("mode", "sort_mode"); + public static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); + public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); private final String fieldName; private final List points = new ArrayList<>(); private GeoDistance geoDistance = GeoDistance.DEFAULT; private DistanceUnit unit = DistanceUnit.DEFAULT; - private SortOrder order = SortOrder.ASC; - - // TODO there is an enum that covers that parameter which we should be using here - private String sortMode = null; + + private SortMode sortMode = null; @SuppressWarnings("rawtypes") private QueryBuilder nestedFilter; private String nestedPath; - + // TODO switch to GeoValidationMethod enum private boolean coerce = DEFAULT_COERCE; private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED; @@ -109,7 +129,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } this.fieldName = fieldName; } - + /** * Copy constructor. * */ @@ -125,7 +145,38 @@ public class GeoDistanceSortBuilder extends SortBuilder this.coerce = original.coerce; this.ignoreMalformed = original.ignoreMalformed; } - + + /** + * Read from a stream. + */ + @SuppressWarnings("unchecked") + public GeoDistanceSortBuilder(StreamInput in) throws IOException { + fieldName = in.readString(); + points.addAll((List) in.readGenericValue()); + geoDistance = GeoDistance.readGeoDistanceFrom(in); + unit = DistanceUnit.readFromStream(in); + order = SortOrder.readFromStream(in); + sortMode = in.readOptionalWriteable(SortMode::readFromStream); + nestedFilter = in.readOptionalQuery(); + nestedPath = in.readOptionalString(); + coerce = in.readBoolean(); + ignoreMalformed =in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeGenericValue(points); + geoDistance.writeTo(out); + unit.writeTo(out); + order.writeTo(out); + out.writeOptionalWriteable(sortMode); + out.writeOptionalQuery(nestedFilter); + out.writeOptionalString(nestedPath); + out.writeBoolean(coerce); + out.writeBoolean(ignoreMalformed); + } + /** * Returns the geo point like field the distance based sort operates on. * */ @@ -153,7 +204,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.points.addAll(Arrays.asList(points)); return this; } - + /** * Returns the points to create the range distance facets from. */ @@ -163,7 +214,7 @@ public class GeoDistanceSortBuilder extends SortBuilder /** * The geohash of the geo point to create the range distance facets from. - * + * * Deprecated - please use points(GeoPoint... points) instead. */ @Deprecated @@ -173,7 +224,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + /** * The geo distance type used to compute the distance. */ @@ -181,7 +232,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.geoDistance = geoDistance; return this; } - + /** * Returns the geo distance type used to compute the distance. */ @@ -204,37 +255,13 @@ public class GeoDistanceSortBuilder extends SortBuilder return this.unit; } - /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. - */ - @Override - public GeoDistanceSortBuilder order(SortOrder order) { - this.order = order; - return this; - } - - /** Returns the order of sorting. */ - public SortOrder order() { - return this.order; - } - - /** - * Not relevant. - * - * TODO should this throw an exception rather than silently ignore a parameter that is not used? - */ - @Override - public GeoDistanceSortBuilder missing(Object missing) { - return this; - } - /** * Defines which distance to use for sorting in the case a document contains multiple geo points. * Possible values: min and max */ - public GeoDistanceSortBuilder sortMode(String sortMode) { - MultiValueMode temp = MultiValueMode.fromString(sortMode); - if (temp == MultiValueMode.SUM) { + public GeoDistanceSortBuilder sortMode(SortMode sortMode) { + Objects.requireNonNull(sortMode, "sort mode cannot be null"); + if (sortMode == SortMode.SUM) { throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } this.sortMode = sortMode; @@ -242,7 +269,7 @@ public class GeoDistanceSortBuilder extends SortBuilder } /** Returns which distance to use for sorting in the case a document contains multiple geo points. */ - public String sortMode() { + public SortMode sortMode() { return this.sortMode; } @@ -250,16 +277,16 @@ public class GeoDistanceSortBuilder extends SortBuilder * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public GeoDistanceSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } - /** + /** * Returns the nested filter that the nested objects should match with in order to be taken into account - * for sorting. + * for sorting. **/ - public QueryBuilder getNestedFilter() { + public QueryBuilder getNestedFilter() { return this.nestedFilter; } @@ -271,7 +298,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.nestedPath = nestedPath; return this; } - + /** * Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a * field inside a nested object, the nearest upper nested object is selected as nested path. @@ -295,13 +322,14 @@ public class GeoDistanceSortBuilder extends SortBuilder } return this; } - + public boolean ignoreMalformed() { return this.ignoreMalformed; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.startObject(NAME); builder.startArray(fieldName); @@ -310,27 +338,24 @@ public class GeoDistanceSortBuilder extends SortBuilder } builder.endArray(); - builder.field("unit", unit); - builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT)); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } else { - builder.field("reverse", false); - } + builder.field(UNIT_FIELD.getPreferredName(), unit); + builder.field(DISTANCE_TYPE_FIELD.getPreferredName(), geoDistance.name().toLowerCase(Locale.ROOT)); + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { - builder.field("mode", sortMode); + builder.field(SORTMODE_FIELD.getPreferredName(), sortMode); } if (nestedPath != null) { - builder.field("nested_path", nestedPath); + builder.field(NESTED_PATH_FIELD.getPreferredName(), nestedPath); } if (nestedFilter != null) { - builder.field("nested_filter", nestedFilter, params); + builder.field(NESTED_FILTER_FIELD.getPreferredName(), nestedFilter, params); } - builder.field("coerce", coerce); - builder.field("ignore_malformed", ignoreMalformed); + builder.field(COERCE_FIELD.getPreferredName(), coerce); + builder.field(IGNORE_MALFORMED_FIELD.getPreferredName(), ignoreMalformed); + builder.endObject(); builder.endObject(); return builder; } @@ -369,59 +394,25 @@ public class GeoDistanceSortBuilder extends SortBuilder this.unit, this.sortMode, this.order, this.nestedFilter, this.nestedPath, this.coerce, this.ignoreMalformed); } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(fieldName); - out.writeGenericValue(points); - - geoDistance.writeTo(out); - unit.writeTo(out); - order.writeTo(out); - out.writeOptionalString(sortMode); - if (nestedFilter != null) { - out.writeBoolean(true); - out.writeQuery(nestedFilter); - } else { - out.writeBoolean(false); - } - out.writeOptionalString(nestedPath); - out.writeBoolean(coerce); - out.writeBoolean(ignoreMalformed); - } - - @Override - public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException { - String fieldName = in.readString(); - - ArrayList points = (ArrayList) in.readGenericValue(); - GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()])); - - result.geoDistance(GeoDistance.readGeoDistanceFrom(in)); - result.unit(DistanceUnit.readDistanceUnit(in)); - result.order(SortOrder.readOrderFrom(in)); - String sortMode = in.readOptionalString(); - if (sortMode != null) { - result.sortMode(sortMode); - } - if (in.readBoolean()) { - result.setNestedFilter(in.readQuery()); - } - result.setNestedPath(in.readOptionalString()); - result.coerce(in.readBoolean()); - result.ignoreMalformed(in.readBoolean()); - return result; - } - - @Override - public GeoDistanceSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { + /** + * Creates a new {@link GeoDistanceSortBuilder} from the query held by the {@link QueryParseContext} in + * {@link org.elasticsearch.common.xcontent.XContent} format. + * + * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this + * method call + * @param elementName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g. + * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument + */ + public static GeoDistanceSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { XContentParser parser = context.parser(); + ParseFieldMatcher parseFieldMatcher = context.parseFieldMatcher(); String fieldName = null; List geoPoints = new ArrayList<>(); DistanceUnit unit = DistanceUnit.DEFAULT; GeoDistance geoDistance = GeoDistance.DEFAULT; - boolean reverse = false; - MultiValueMode sortMode = null; - QueryBuilder nestedFilter = null; + SortOrder order = SortOrder.ASC; + SortMode sortMode = null; + QueryBuilder nestedFilter = null; String nestedPath = null; boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; @@ -437,40 +428,37 @@ public class GeoDistanceSortBuilder extends SortBuilder fieldName = currentName; } else if (token == XContentParser.Token.START_OBJECT) { - // the json in the format of -> field : { lat : 30, lon : 12 } - if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { - // TODO Note to remember: while this is kept as a QueryBuilder internally, - // we need to make sure to call toFilter() on it once on the shard - // (e.g. in the new build() method) + if (parseFieldMatcher.match(currentName, NESTED_FILTER_FIELD)) { nestedFilter = context.parseInnerQueryBuilder(); } else { + // the json in the format of -> field : { lat : 30, lon : 12 } fieldName = currentName; GeoPoint point = new GeoPoint(); GeoUtils.parseGeoPoint(parser, point); geoPoints.add(point); } } else if (token.isValue()) { - if ("reverse".equals(currentName)) { - reverse = parser.booleanValue(); - } else if ("order".equals(currentName)) { - reverse = "desc".equals(parser.text()); - } else if ("unit".equals(currentName)) { + if (parseFieldMatcher.match(currentName, REVERSE_FIELD)) { + order = parser.booleanValue() ? SortOrder.DESC : SortOrder.ASC; + } else if (parseFieldMatcher.match(currentName, ORDER_FIELD)) { + order = SortOrder.fromString(parser.text()); + } else if (parseFieldMatcher.match(currentName, UNIT_FIELD)) { unit = DistanceUnit.fromString(parser.text()); - } else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) { + } else if (parseFieldMatcher.match(currentName, DISTANCE_TYPE_FIELD)) { geoDistance = GeoDistance.fromString(parser.text()); - } else if ("coerce".equals(currentName) || "normalize".equals(currentName)) { + } else if (parseFieldMatcher.match(currentName, COERCE_FIELD)) { coerce = parser.booleanValue(); if (coerce == true) { ignoreMalformed = true; } - } else if ("ignore_malformed".equals(currentName)) { + } else if (parseFieldMatcher.match(currentName, IGNORE_MALFORMED_FIELD)) { boolean ignore_malformed_value = parser.booleanValue(); if (coerce == false) { ignoreMalformed = ignore_malformed_value; } - } else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) { - sortMode = MultiValueMode.fromString(parser.text()); - } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) { + } else if (parseFieldMatcher.match(currentName, SORTMODE_FIELD)) { + sortMode = SortMode.fromString(parser.text()); + } else if (parseFieldMatcher.match(currentName, NESTED_PATH_FIELD)) { nestedPath = parser.text(); } else { GeoPoint point = new GeoPoint(); @@ -484,20 +472,94 @@ public class GeoDistanceSortBuilder extends SortBuilder GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()])); result.geoDistance(geoDistance); result.unit(unit); - if (reverse) { - result.order(SortOrder.DESC); - } else { - result.order(SortOrder.ASC); - } + result.order(order); if (sortMode != null) { - result.sortMode(sortMode.name()); + result.sortMode(sortMode); } result.setNestedFilter(nestedFilter); result.setNestedPath(nestedPath); result.coerce(coerce); result.ignoreMalformed(ignoreMalformed); return result; + } + @Override + public SortField build(QueryShardContext context) throws IOException { + final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); + // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes + List localPoints = new ArrayList(); + for (GeoPoint geoPoint : this.points) { + localPoints.add(new GeoPoint(geoPoint)); + } + + if (!indexCreatedBeforeV2_0 && !ignoreMalformed) { + for (GeoPoint point : localPoints) { + if (GeoUtils.isValidLatitude(point.lat()) == false) { + throw new ElasticsearchParseException("illegal latitude value [{}] for [GeoDistanceSort]", point.lat()); + } + if (GeoUtils.isValidLongitude(point.lon()) == false) { + throw new ElasticsearchParseException("illegal longitude value [{}] for [GeoDistanceSort]", point.lon()); + } + } + } + + if (coerce) { + for (GeoPoint point : localPoints) { + GeoUtils.normalizePoint(point, coerce, coerce); + } + } + + boolean reverse = (order == SortOrder.DESC); + final MultiValueMode finalSortMode; + if (sortMode == null) { + finalSortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; + } else { + finalSortMode = MultiValueMode.fromString(sortMode.toString()); + } + + MappedFieldType fieldType = context.fieldMapper(fieldName); + if (fieldType == null) { + throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); + } + final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType); + final FixedSourceDistance[] distances = new FixedSourceDistance[localPoints.size()]; + for (int i = 0; i< localPoints.size(); i++) { + distances[i] = geoDistance.fixedSourceDistance(localPoints.get(i).lat(), localPoints.get(i).lon(), unit); + } + + final Nested nested = resolveNested(context, nestedPath, nestedFilter); + + IndexFieldData.XFieldComparatorSource geoDistanceComparatorSource = new IndexFieldData.XFieldComparatorSource() { + + @Override + public SortField.Type reducedType() { + return SortField.Type.DOUBLE; + } + + @Override + public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { + return new FieldComparator.DoubleComparator(numHits, null, null) { + @Override + protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { + final MultiGeoPointValues geoPointValues = geoIndexFieldData.load(context).getGeoPointValues(); + final SortedNumericDoubleValues distanceValues = GeoDistance.distanceValues(geoPointValues, distances); + final NumericDoubleValues selectedValues; + if (nested == null) { + selectedValues = finalSortMode.select(distanceValues, Double.MAX_VALUE); + } else { + final BitSet rootDocs = nested.rootDocs(context); + final DocIdSetIterator innerDocs = nested.innerDocs(context); + selectedValues = finalSortMode.select(distanceValues, Double.MAX_VALUE, rootDocs, innerDocs, + context.reader().maxDoc()); + } + return selectedValues.getRawDoubleValues(); + } + }; + } + + }; + + return new SortField(fieldName, geoDistanceComparatorSource, reverse); } static void parseGeoPoints(XContentParser parser, List geoPoints) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java deleted file mode 100644 index 27c8b8e0ed5..00000000000 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.sort; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.join.BitSetProducer; -import org.apache.lucene.util.BitSet; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; -import org.elasticsearch.common.geo.GeoDistance; -import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; -import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.unit.DistanceUnit; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; -import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; -import org.elasticsearch.index.fielddata.MultiGeoPointValues; -import org.elasticsearch.index.fielddata.NumericDoubleValues; -import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * - */ -public class GeoDistanceSortParser implements SortParser { - - @Override - public String[] names() { - return new String[]{"_geo_distance", "_geoDistance"}; - } - - @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { - String fieldName = null; - List geoPoints = new ArrayList<>(); - DistanceUnit unit = DistanceUnit.DEFAULT; - GeoDistance geoDistance = GeoDistance.DEFAULT; - boolean reverse = false; - MultiValueMode sortMode = null; - NestedInnerQueryParseSupport nestedHelper = null; - - final boolean indexCreatedBeforeV2_0 = context.indexShard().indexSettings().getIndexVersionCreated().before(Version.V_2_0_0); - boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE; - boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED; - - XContentParser.Token token; - String currentName = parser.currentName(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - GeoDistanceSortBuilder.parseGeoPoints(parser, geoPoints); - - fieldName = currentName; - } else if (token == XContentParser.Token.START_OBJECT) { - // the json in the format of -> field : { lat : 30, lon : 12 } - if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { - if (nestedHelper == null) { - nestedHelper = new NestedInnerQueryParseSupport(parser, context); - } - nestedHelper.filter(); - } else { - fieldName = currentName; - GeoPoint point = new GeoPoint(); - GeoUtils.parseGeoPoint(parser, point); - geoPoints.add(point); - } - } else if (token.isValue()) { - if ("reverse".equals(currentName)) { - reverse = parser.booleanValue(); - } else if ("order".equals(currentName)) { - reverse = "desc".equals(parser.text()); - } else if (currentName.equals("unit")) { - unit = DistanceUnit.fromString(parser.text()); - } else if (currentName.equals("distance_type") || currentName.equals("distanceType")) { - geoDistance = GeoDistance.fromString(parser.text()); - } else if ("coerce".equals(currentName) || (indexCreatedBeforeV2_0 && "normalize".equals(currentName))) { - coerce = parser.booleanValue(); - if (coerce == true) { - ignoreMalformed = true; - } - } else if ("ignore_malformed".equals(currentName) && coerce == false) { - ignoreMalformed = parser.booleanValue(); - } else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) { - sortMode = MultiValueMode.fromString(parser.text()); - } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) { - if (nestedHelper == null) { - nestedHelper = new NestedInnerQueryParseSupport(parser, context); - } - nestedHelper.setPath(parser.text()); - } else { - GeoPoint point = new GeoPoint(); - point.resetFromString(parser.text()); - geoPoints.add(point); - fieldName = currentName; - } - } - } - - // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes - if (!indexCreatedBeforeV2_0 && !ignoreMalformed) { - for (GeoPoint point : geoPoints) { - if (point.lat() > 90.0 || point.lat() < -90.0) { - throw new ElasticsearchParseException("illegal latitude value [{}] for [GeoDistanceSort]", point.lat()); - } - if (point.lon() > 180.0 || point.lon() < -180) { - throw new ElasticsearchParseException("illegal longitude value [{}] for [GeoDistanceSort]", point.lon()); - } - } - } - - if (coerce) { - for (GeoPoint point : geoPoints) { - GeoUtils.normalizePoint(point, coerce, coerce); - } - } - - if (sortMode == null) { - sortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; - } - - if (sortMode == MultiValueMode.SUM) { - throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); - } - - MappedFieldType fieldType = context.smartNameFieldType(fieldName); - if (fieldType == null) { - throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); - } - final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class - final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(fieldType); - final FixedSourceDistance[] distances = new FixedSourceDistance[geoPoints.size()]; - for (int i = 0; i< geoPoints.size(); i++) { - distances[i] = geoDistance.fixedSourceDistance(geoPoints.get(i).lat(), geoPoints.get(i).lon(), unit); - } - - final Nested nested; - if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; - if (nestedHelper.filterFound()) { - // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); - } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); - } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); - } else { - nested = null; - } - - IndexFieldData.XFieldComparatorSource geoDistanceComparatorSource = new IndexFieldData.XFieldComparatorSource() { - - @Override - public SortField.Type reducedType() { - return SortField.Type.DOUBLE; - } - - @Override - public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException { - return new FieldComparator.DoubleComparator(numHits, null, null) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException { - final MultiGeoPointValues geoPointValues = geoIndexFieldData.load(context).getGeoPointValues(); - final SortedNumericDoubleValues distanceValues = GeoDistance.distanceValues(geoPointValues, distances); - final NumericDoubleValues selectedValues; - if (nested == null) { - selectedValues = finalSortMode.select(distanceValues, Double.MAX_VALUE); - } else { - final BitSet rootDocs = nested.rootDocs(context); - final DocIdSetIterator innerDocs = nested.innerDocs(context); - selectedValues = finalSortMode.select(distanceValues, Double.MAX_VALUE, rootDocs, innerDocs, context.reader().maxDoc()); - } - return selectedValues.getRawDoubleValues(); - } - }; - } - - }; - - return new SortField(fieldName, geoDistanceComparatorSource, reverse); - } - -} diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java index 7435ff95f45..271c94c59ad 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java @@ -19,40 +19,126 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.search.SortField; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; +import java.util.Objects; /** * A sort builder allowing to sort by score. - * - * */ -public class ScoreSortBuilder extends SortBuilder { +public class ScoreSortBuilder extends SortBuilder { - private SortOrder order; + public static final String NAME = "_score"; + public static final ParseField REVERSE_FIELD = new ParseField("reverse"); + public static final ParseField ORDER_FIELD = new ParseField("order"); + private static final SortField SORT_SCORE = new SortField(null, SortField.Type.SCORE); + private static final SortField SORT_SCORE_REVERSE = new SortField(null, SortField.Type.SCORE, true); /** - * The order of sort scoring. By default, its {@link SortOrder#DESC}. + * Build a ScoreSortBuilder default to descending sort order. */ - @Override - public ScoreSortBuilder order(SortOrder order) { - this.order = order; - return this; + public ScoreSortBuilder() { + // order defaults to desc when sorting on the _score + order(SortOrder.DESC); + } + + /** + * Read from a stream. + */ + public ScoreSortBuilder(StreamInput in) throws IOException { + order(SortOrder.readFromStream(in)); } @Override - public SortBuilder missing(Object missing) { - return this; + public void writeTo(StreamOutput out) throws IOException { + order.writeTo(out); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("_score"); - if (order == SortOrder.ASC) { - builder.field("reverse", true); - } + builder.startObject(); + builder.startObject(NAME); + builder.field(ORDER_FIELD.getPreferredName(), order); + builder.endObject(); builder.endObject(); return builder; } + + /** + * Creates a new {@link ScoreSortBuilder} from the query held by the {@link QueryParseContext} in + * {@link org.elasticsearch.common.xcontent.XContent} format. + * + * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this + * method call + * @param fieldName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g. + * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument + */ + public static ScoreSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException { + XContentParser parser = context.parser(); + ParseFieldMatcher matcher = context.parseFieldMatcher(); + + XContentParser.Token token; + String currentName = parser.currentName(); + ScoreSortBuilder result = new ScoreSortBuilder(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (matcher.match(currentName, REVERSE_FIELD)) { + if (parser.booleanValue()) { + result.order(SortOrder.ASC); + } + // else we keep the default DESC + } else if (matcher.match(currentName, ORDER_FIELD)) { + result.order(SortOrder.fromString(parser.text())); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); + } + } + return result; + } + + @Override + public SortField build(QueryShardContext context) { + if (order == SortOrder.DESC) { + return SORT_SCORE; + } else { + return SORT_SCORE_REVERSE; + } + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ScoreSortBuilder other = (ScoreSortBuilder) object; + return Objects.equals(order, other.order); + } + + @Override + public int hashCode() { + return Objects.hash(this.order); + } + + @Override + public String getWriteableName() { + return NAME; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index e9a9c8df57c..2751d497519 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -19,26 +19,68 @@ package org.elasticsearch.search.sort; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.SortField; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; +import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.Script; +import org.elasticsearch.script.Script.ScriptField; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptParameterParser; +import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; +import org.elasticsearch.script.SearchScript; +import org.elasticsearch.search.MultiValueMode; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; /** * Script sort builder allows to sort based on a custom script expression. */ -public class ScriptSortBuilder extends SortBuilder { +public class ScriptSortBuilder extends SortBuilder { - private Script script; + public static final String NAME = "_script"; + public static final ParseField TYPE_FIELD = new ParseField("type"); + public static final ParseField SCRIPT_FIELD = new ParseField("script"); + public static final ParseField SORTMODE_FIELD = new ParseField("mode"); + public static final ParseField NESTED_PATH_FIELD = new ParseField("nested_path"); + public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter"); + public static final ParseField PARAMS_FIELD = new ParseField("params"); - private final String type; + private final Script script; - private SortOrder order; + private final ScriptSortType type; - private String sortMode; + private SortMode sortMode; - private QueryBuilder nestedFilter; + private QueryBuilder nestedFilter; private String nestedPath; @@ -47,47 +89,99 @@ public class ScriptSortBuilder extends SortBuilder { * * @param script * The script to use. + * @param type + * The type of the script, can be either {@link ScriptSortType#STRING} or + * {@link ScriptSortType#NUMBER} */ - public ScriptSortBuilder(Script script, String type) { + public ScriptSortBuilder(Script script, ScriptSortType type) { + Objects.requireNonNull(script, "script cannot be null"); + Objects.requireNonNull(type, "type cannot be null"); this.script = script; this.type = type; } - /** - * Sets the sort order. - */ - @Override - public ScriptSortBuilder order(SortOrder order) { - this.order = order; - return this; + ScriptSortBuilder(ScriptSortBuilder original) { + this.script = original.script; + this.type = original.type; + this.order = original.order; + this.sortMode = original.sortMode; + this.nestedFilter = original.nestedFilter; + this.nestedPath = original.nestedPath; } /** - * Not really relevant. + * Read from a stream. */ + public ScriptSortBuilder(StreamInput in) throws IOException { + script = Script.readScript(in); + type = ScriptSortType.readFromStream(in); + order = SortOrder.readFromStream(in); + sortMode = in.readOptionalWriteable(SortMode::readFromStream); + nestedPath = in.readOptionalString(); + nestedFilter = in.readOptionalQuery(); + } + @Override - public SortBuilder missing(Object missing) { - return this; + public void writeTo(StreamOutput out) throws IOException { + script.writeTo(out); + type.writeTo(out); + order.writeTo(out); + out.writeOptionalWriteable(sortMode); + out.writeOptionalString(nestedPath); + out.writeOptionalQuery(nestedFilter); } /** - * Defines which distance to use for sorting in the case a document contains multiple geo points. - * Possible values: min and max + * Get the script used in this sort. */ - public ScriptSortBuilder sortMode(String sortMode) { + public Script script() { + return this.script; + } + + /** + * Get the type used in this sort. + */ + public ScriptSortType type() { + return this.type; + } + + /** + * Defines which distance to use for sorting in the case a document contains multiple values.
    + * For {@link ScriptSortType#STRING}, the set of possible values is restricted to {@link SortMode#MIN} and {@link SortMode#MAX} + */ + public ScriptSortBuilder sortMode(SortMode sortMode) { + Objects.requireNonNull(sortMode, "sort mode cannot be null."); + if (ScriptSortType.STRING.equals(type) && (sortMode == SortMode.SUM || sortMode == SortMode.AVG || + sortMode == SortMode.MEDIAN)) { + throw new IllegalArgumentException("script sort of type [string] doesn't support mode [" + sortMode + "]"); + } this.sortMode = sortMode; return this; } + /** + * Get the sort mode. + */ + public SortMode sortMode() { + return this.sortMode; + } + /** * Sets the nested filter that the nested objects should match with in order to be taken into account * for sorting. */ - public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { + public ScriptSortBuilder setNestedFilter(QueryBuilder nestedFilter) { this.nestedFilter = nestedFilter; return this; } + /** + * Gets the nested filter. + */ + public QueryBuilder getNestedFilter() { + return this.nestedFilter; + } + /** * Sets the nested path if sorting occurs on a field that is inside a nested object. For sorting by script this * needs to be specified. @@ -97,24 +191,248 @@ public class ScriptSortBuilder extends SortBuilder { return this; } + /** + * Gets the nested path. + */ + public String getNestedPath() { + return this.nestedPath; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) throws IOException { - builder.startObject("_script"); - builder.field("script", script); - builder.field("type", type); - if (order == SortOrder.DESC) { - builder.field("reverse", true); - } + builder.startObject(); + builder.startObject(NAME); + builder.field(SCRIPT_FIELD.getPreferredName(), script); + builder.field(TYPE_FIELD.getPreferredName(), type); + builder.field(ORDER_FIELD.getPreferredName(), order); if (sortMode != null) { - builder.field("mode", sortMode); + builder.field(SORTMODE_FIELD.getPreferredName(), sortMode); } if (nestedPath != null) { - builder.field("nested_path", nestedPath); + builder.field(NESTED_PATH_FIELD.getPreferredName(), nestedPath); } if (nestedFilter != null) { - builder.field("nested_filter", nestedFilter, builderParams); + builder.field(NESTED_FILTER_FIELD.getPreferredName(), nestedFilter, builderParams); } builder.endObject(); + builder.endObject(); return builder; } + + /** + * Creates a new {@link ScriptSortBuilder} from the query held by the {@link QueryParseContext} in + * {@link org.elasticsearch.common.xcontent.XContent} format. + * + * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this + * method call + * @param elementName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g. + * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument + */ + public static ScriptSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException { + ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); + XContentParser parser = context.parser(); + ParseFieldMatcher parseField = context.parseFieldMatcher(); + Script script = null; + ScriptSortType type = null; + SortMode sortMode = null; + SortOrder order = null; + QueryBuilder nestedFilter = null; + String nestedPath = null; + Map params = new HashMap<>(); + + XContentParser.Token token; + String currentName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if (parseField.match(currentName, ScriptField.SCRIPT)) { + script = Script.parse(parser, parseField); + } else if (parseField.match(currentName, PARAMS_FIELD)) { + params = parser.map(); + } else if (parseField.match(currentName, NESTED_FILTER_FIELD)) { + nestedFilter = context.parseInnerQueryBuilder(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else if (token.isValue()) { + if (parseField.match(currentName, ORDER_FIELD)) { + order = SortOrder.fromString(parser.text()); + } else if (scriptParameterParser.token(currentName, token, parser, parseField)) { + // Do Nothing (handled by ScriptParameterParser + } else if (parseField.match(currentName, TYPE_FIELD)) { + type = ScriptSortType.fromString(parser.text()); + } else if (parseField.match(currentName, SORTMODE_FIELD)) { + sortMode = SortMode.fromString(parser.text()); + } else if (parseField.match(currentName, NESTED_PATH_FIELD)) { + nestedPath = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] failed to parse field [" + currentName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unexpected token [" + token + "]"); + } + } + + if (script == null) { // Didn't find anything using the new API so try using the old one instead + ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue(); + if (scriptValue != null) { + if (params == null) { + params = new HashMap<>(); + } + script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); + } + } + + ScriptSortBuilder result = new ScriptSortBuilder(script, type); + if (order != null) { + result.order(order); + } + if (sortMode != null) { + result.sortMode(sortMode); + } + if (nestedFilter != null) { + result.setNestedFilter(nestedFilter); + } + if (nestedPath != null) { + result.setNestedPath(nestedPath); + } + return result; + } + + + @Override + public SortField build(QueryShardContext context) throws IOException { + final SearchScript searchScript = context.getScriptService().search( + context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); + + MultiValueMode valueMode = null; + if (sortMode != null) { + valueMode = MultiValueMode.fromString(sortMode.toString()); + } + boolean reverse = (order == SortOrder.DESC); + if (valueMode == null) { + valueMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; + } + + final Nested nested = resolveNested(context, nestedPath, nestedFilter); + final IndexFieldData.XFieldComparatorSource fieldComparatorSource; + switch (type) { + case STRING: + fieldComparatorSource = new BytesRefFieldComparatorSource(null, null, valueMode, nested) { + LeafSearchScript leafScript; + @Override + protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOException { + leafScript = searchScript.getLeafSearchScript(context); + final BinaryDocValues values = new BinaryDocValues() { + final BytesRefBuilder spare = new BytesRefBuilder(); + @Override + public BytesRef get(int docID) { + leafScript.setDocument(docID); + spare.copyChars(leafScript.run().toString()); + return spare.get(); + } + }; + return FieldData.singleton(values, null); + } + @Override + protected void setScorer(Scorer scorer) { + leafScript.setScorer(scorer); + } + }; + break; + case NUMBER: + fieldComparatorSource = new DoubleValuesComparatorSource(null, Double.MAX_VALUE, valueMode, nested) { + LeafSearchScript leafScript; + @Override + protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws IOException { + leafScript = searchScript.getLeafSearchScript(context); + final NumericDoubleValues values = new NumericDoubleValues() { + @Override + public double get(int docID) { + leafScript.setDocument(docID); + return leafScript.runAsDouble(); + } + }; + return FieldData.singleton(values, null); + } + @Override + protected void setScorer(Scorer scorer) { + leafScript.setScorer(scorer); + } + }; + break; + default: + throw new QueryShardException(context, "custom script sort type [" + type + "] not supported"); + } + + return new SortField("_script", fieldComparatorSource, reverse); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + ScriptSortBuilder other = (ScriptSortBuilder) object; + return Objects.equals(script, other.script) && + Objects.equals(type, other.type) && + Objects.equals(order, other.order) && + Objects.equals(sortMode, other.sortMode) && + Objects.equals(nestedFilter, other.nestedFilter) && + Objects.equals(nestedPath, other.nestedPath); + } + + @Override + public int hashCode() { + return Objects.hash(script, type, order, sortMode, nestedFilter, nestedPath); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public enum ScriptSortType implements Writeable { + /** script sort for a string value **/ + STRING, + /** script sort for a numeric value **/ + NUMBER; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + /** + * Read from a stream. + */ + static ScriptSortType readFromStream(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown ScriptSortType ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static ScriptSortType fromString(final String str) { + Objects.requireNonNull(str, "input string is null"); + switch (str.toLowerCase(Locale.ROOT)) { + case ("string"): + return ScriptSortType.STRING; + case ("number"): + return ScriptSortType.NUMBER; + default: + throw new IllegalArgumentException("Unknown ScriptSortType [" + str + "]"); + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java deleted file mode 100644 index e4fe2c08f75..00000000000 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.sort; - -import org.apache.lucene.index.BinaryDocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.join.BitSetProducer; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.FieldData; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; -import org.elasticsearch.index.fielddata.NumericDoubleValues; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; -import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; -import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; -import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.script.LeafSearchScript; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.Script.ScriptField; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptParameterParser; -import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; -import org.elasticsearch.script.SearchScript; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.SearchParseException; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * - */ -public class ScriptSortParser implements SortParser { - - private static final String STRING_SORT_TYPE = "string"; - private static final String NUMBER_SORT_TYPE = "number"; - - @Override - public String[] names() { - return new String[]{"_script"}; - } - - @Override - public SortField parse(XContentParser parser, SearchContext context) throws Exception { - ScriptParameterParser scriptParameterParser = new ScriptParameterParser(); - Script script = null; - String type = null; - Map params = null; - boolean reverse = false; - MultiValueMode sortMode = null; - NestedInnerQueryParseSupport nestedHelper = null; - - XContentParser.Token token; - String currentName = parser.currentName(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (context.parseFieldMatcher().match(currentName, ScriptField.SCRIPT)) { - script = Script.parse(parser, context.parseFieldMatcher()); - } else if ("params".equals(currentName)) { - params = parser.map(); - } else if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) { - if (nestedHelper == null) { - nestedHelper = new NestedInnerQueryParseSupport(parser, context); - } - nestedHelper.filter(); - } - } else if (token.isValue()) { - if ("reverse".equals(currentName)) { - reverse = parser.booleanValue(); - } else if ("order".equals(currentName)) { - reverse = "desc".equals(parser.text()); - } else if (scriptParameterParser.token(currentName, token, parser, context.parseFieldMatcher())) { - // Do Nothing (handled by ScriptParameterParser - } else if ("type".equals(currentName)) { - type = parser.text(); - } else if ("mode".equals(currentName)) { - sortMode = MultiValueMode.fromString(parser.text()); - } else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) { - if (nestedHelper == null) { - nestedHelper = new NestedInnerQueryParseSupport(parser, context); - } - nestedHelper.setPath(parser.text()); - } - } - } - - if (script == null) { // Didn't find anything using the new API so try using the old one instead - ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue(); - if (scriptValue != null) { - if (params == null) { - params = new HashMap<>(); - } - script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params); - } - } else if (params != null) { - throw new SearchParseException(context, "script params must be specified inside script object", parser.getTokenLocation()); - } - - if (script == null) { - throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); - } - if (type == null) { - throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); - } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); - - if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); - } - - if (sortMode == null) { - sortMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN; - } - - // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` - final Nested nested; - if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; - if (nestedHelper.filterFound()) { - // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); - } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); - } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); - } else { - nested = null; - } - - final IndexFieldData.XFieldComparatorSource fieldComparatorSource; - switch (type) { - case STRING_SORT_TYPE: - fieldComparatorSource = new BytesRefFieldComparatorSource(null, null, sortMode, nested) { - LeafSearchScript leafScript; - @Override - protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOException { - leafScript = searchScript.getLeafSearchScript(context); - final BinaryDocValues values = new BinaryDocValues() { - final BytesRefBuilder spare = new BytesRefBuilder(); - @Override - public BytesRef get(int docID) { - leafScript.setDocument(docID); - spare.copyChars(leafScript.run().toString()); - return spare.get(); - } - }; - return FieldData.singleton(values, null); - } - @Override - protected void setScorer(Scorer scorer) { - leafScript.setScorer(scorer); - } - }; - break; - case NUMBER_SORT_TYPE: - // TODO: should we rather sort missing values last? - fieldComparatorSource = new DoubleValuesComparatorSource(null, Double.MAX_VALUE, sortMode, nested) { - LeafSearchScript leafScript; - @Override - protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws IOException { - leafScript = searchScript.getLeafSearchScript(context); - final NumericDoubleValues values = new NumericDoubleValues() { - @Override - public double get(int docID) { - leafScript.setDocument(docID); - return leafScript.runAsDouble(); - } - }; - return FieldData.singleton(values, null); - } - @Override - protected void setScorer(Scorer scorer) { - leafScript.setScorer(scorer); - } - }; - break; - default: - throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); - } - - return new SortField("_script", fieldComparatorSource, reverse); - } -} diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index da80506dde2..ee6af01c93b 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -19,36 +19,191 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.join.BitSetProducer; +import org.elasticsearch.action.support.ToXContentToBytes; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Collections.unmodifiableMap; /** * */ -public abstract class SortBuilder implements ToXContent { +public abstract class SortBuilder> extends ToXContentToBytes implements NamedWriteable { - @Override - public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.prettyPrint(); - toXContent(builder, EMPTY_PARAMS); - return builder.string(); - } catch (Exception e) { - throw new ElasticsearchException("Failed to build query", e); - } + protected SortOrder order = SortOrder.ASC; + public static final ParseField ORDER_FIELD = new ParseField("order"); + + private static final Map> PARSERS; + static { + Map> parsers = new HashMap<>(); + parsers.put(ScriptSortBuilder.NAME, ScriptSortBuilder::fromXContent); + parsers.put(GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::fromXContent); + parsers.put(GeoDistanceSortBuilder.ALTERNATIVE_NAME, GeoDistanceSortBuilder::fromXContent); + parsers.put(ScoreSortBuilder.NAME, ScoreSortBuilder::fromXContent); + // FieldSortBuilder gets involved if the user specifies a name that isn't one of these. + PARSERS = unmodifiableMap(parsers); } /** - * The order of sorting. Defaults to {@link SortOrder#ASC}. + * Create a @link {@link SortField} from this builder. */ - public abstract SortBuilder order(SortOrder order); + protected abstract SortField build(QueryShardContext context) throws IOException; /** - * Sets the value when a field is missing in a doc. Can also be set to _last or - * _first to sort missing last or first respectively. + * Set the order of sorting. */ - public abstract SortBuilder missing(Object missing); + @SuppressWarnings("unchecked") + public T order(SortOrder order) { + Objects.requireNonNull(order, "sort order cannot be null."); + this.order = order; + return (T) this; + } + + /** + * Return the {@link SortOrder} used for this {@link SortBuilder}. + */ + public SortOrder order() { + return this.order; + } + + public static List> fromXContent(QueryParseContext context) throws IOException { + List> sortFields = new ArrayList<>(2); + XContentParser parser = context.parser(); + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { + parseCompoundSortField(parser, context, sortFields); + } else if (token == XContentParser.Token.VALUE_STRING) { + String fieldName = parser.text(); + sortFields.add(fieldOrScoreSort(fieldName)); + } else { + throw new IllegalArgumentException("malformed sort format, " + + "within the sort array, an object, or an actual string are allowed"); + } + } + } else if (token == XContentParser.Token.VALUE_STRING) { + String fieldName = parser.text(); + sortFields.add(fieldOrScoreSort(fieldName)); + } else if (token == XContentParser.Token.START_OBJECT) { + parseCompoundSortField(parser, context, sortFields); + } else { + throw new IllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); + } + return sortFields; + } + + private static SortBuilder fieldOrScoreSort(String fieldName) { + if (fieldName.equals(ScoreSortBuilder.NAME)) { + return new ScoreSortBuilder(); + } else { + return new FieldSortBuilder(fieldName); + } + } + + private static void parseCompoundSortField(XContentParser parser, QueryParseContext context, List> sortFields) + throws IOException { + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String fieldName = parser.currentName(); + token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_STRING) { + SortOrder order = SortOrder.fromString(parser.text()); + sortFields.add(fieldOrScoreSort(fieldName).order(order)); + } else { + if (PARSERS.containsKey(fieldName)) { + sortFields.add(PARSERS.get(fieldName).fromXContent(context, fieldName)); + } else { + sortFields.add(FieldSortBuilder.fromXContent(context, fieldName)); + } + } + } + } + } + + public static void parseSort(XContentParser parser, SearchContext context) throws IOException { + QueryParseContext parseContext = context.getQueryShardContext().parseContext(); + parseContext.reset(parser); + Optional sortOptional = buildSort(SortBuilder.fromXContent(parseContext), context.getQueryShardContext()); + if (sortOptional.isPresent()) { + context.sort(sortOptional.get()); + } + } + + public static Optional buildSort(List> sortBuilders, QueryShardContext context) throws IOException { + List sortFields = new ArrayList<>(sortBuilders.size()); + for (SortBuilder builder : sortBuilders) { + sortFields.add(builder.build(context)); + } + if (!sortFields.isEmpty()) { + // optimize if we just sort on score non reversed, we don't really + // need sorting + boolean sort; + if (sortFields.size() > 1) { + sort = true; + } else { + SortField sortField = sortFields.get(0); + if (sortField.getType() == SortField.Type.SCORE && !sortField.getReverse()) { + sort = false; + } else { + sort = true; + } + } + if (sort) { + return Optional.of(new Sort(sortFields.toArray(new SortField[sortFields.size()]))); + } + } + return Optional.empty(); + } + + protected static Nested resolveNested(QueryShardContext context, String nestedPath, QueryBuilder nestedFilter) throws IOException { + Nested nested = null; + if (nestedPath != null) { + BitSetProducer rootDocumentsFilter = context.bitsetFilter(Queries.newNonNestedFilter()); + ObjectMapper nestedObjectMapper = context.getObjectMapper(nestedPath); + if (nestedObjectMapper == null) { + throw new QueryShardException(context, "[nested] failed to find nested object under path [" + nestedPath + "]"); + } + if (!nestedObjectMapper.nested().isNested()) { + throw new QueryShardException(context, "[nested] nested object under path [" + nestedPath + "] is not of nested type"); + } + Query innerDocumentsQuery; + if (nestedFilter != null) { + context.nestedScope().nextLevel(nestedObjectMapper); + innerDocumentsQuery = QueryBuilder.rewriteQuery(nestedFilter, context).toFilter(context); + context.nestedScope().previousLevel(); + } else { + innerDocumentsQuery = nestedObjectMapper.nestedTypeFilter(); + } + nested = new Nested(rootDocumentsFilter, innerDocumentsQuery); + } + return nested; + } + + @FunctionalInterface + private interface Parser> { + T fromXContent(QueryParseContext context, String elementName) throws IOException; + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java index f326fee3837..3eae9b8d019 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilders.java @@ -21,8 +21,7 @@ package org.elasticsearch.search.sort; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.script.Script; - -import java.util.Arrays; +import org.elasticsearch.search.sort.ScriptSortBuilder.ScriptSortType; /** * A set of static factory methods for {@link SortBuilder}s. @@ -53,7 +52,7 @@ public class SortBuilders { * @param script The script to use. * @param type The type, can either be "string" or "number". */ - public static ScriptSortBuilder scriptSort(Script script, String type) { + public static ScriptSortBuilder scriptSort(Script script, ScriptSortType type) { return new ScriptSortBuilder(script, type); } @@ -63,12 +62,12 @@ public class SortBuilders { * @param fieldName The geo point like field name. * @param lat Latitude of the point to create the range distance facets from. * @param lon Longitude of the point to create the range distance facets from. - * + * */ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, double lat, double lon) { return new GeoDistanceSortBuilder(fieldName, lat, lon); } - + /** * Constructs a new distance based sort on a geo point like field. * @@ -87,5 +86,5 @@ public class SortBuilders { */ public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, String ... geohashes) { return new GeoDistanceSortBuilder(fieldName, geohashes); - } + } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java b/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java deleted file mode 100644 index 8893471b6c1..00000000000 --- a/core/src/main/java/org/elasticsearch/search/sort/SortElementParserTemp.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.sort; - -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.index.query.QueryParseContext; - -import java.io.IOException; - -// TODO once sort refactoring is done this needs to be merged into SortBuilder -public interface SortElementParserTemp { - /** - * Creates a new SortBuilder from the json held by the {@link SortElementParserTemp} - * in {@link org.elasticsearch.common.xcontent.XContent} format - * - * @param context - * the input parse context. The state on the parser contained in - * this context will be changed as a side effect of this method - * call - * @return the new item - */ - T fromXContent(QueryParseContext context, String elementName) throws IOException; -} diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortMode.java b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java new file mode 100644 index 00000000000..c6b3e1b10b4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.sort; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * Elasticsearch supports sorting by array or multi-valued fields. The SortMode option controls what array value is picked + * for sorting the document it belongs to. The mode option can have the following values: + *

      + *
    • min - Pick the lowest value.
    • + *
    • max - Pick the highest value.
    • + *
    • sum - Use the sum of all values as sort value. Only applicable for number based array fields.
    • + *
    • avg - Use the average of all values as sort value. Only applicable for number based array fields.
    • + *
    • median - Use the median of all values as sort value. Only applicable for number based array fields.
    • + *
    + */ +public enum SortMode implements Writeable { + /** pick the lowest value **/ + MIN, + /** pick the highest value **/ + MAX, + /** Use the sum of all values as sort value. Only applicable for number based array fields. **/ + SUM, + /** Use the average of all values as sort value. Only applicable for number based array fields. **/ + AVG, + /** Use the median of all values as sort value. Only applicable for number based array fields. **/ + MEDIAN; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + public static SortMode readFromStream(StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown SortMode ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static SortMode fromString(final String str) { + Objects.requireNonNull(str, "input string is null"); + switch (str.toLowerCase(Locale.ROOT)) { + case ("min"): + return MIN; + case ("max"): + return MAX; + case ("sum"): + return SUM; + case ("avg"): + return AVG; + case ("median"): + return MEDIAN; + default: + throw new IllegalArgumentException("Unknown SortMode [" + str + "]"); + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java b/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java index 73e5ac55247..a84a456775b 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java @@ -50,11 +50,8 @@ public enum SortOrder implements Writeable { return "desc"; } }; - - private static final SortOrder PROTOTYPE = ASC; - @Override - public SortOrder readFrom(StreamInput in) throws IOException { + static SortOrder readFromStream(StreamInput in) throws IOException { int ordinal = in.readVInt(); if (ordinal < 0 || ordinal >= values().length) { throw new IOException("Unknown SortOrder ordinal [" + ordinal + "]"); @@ -62,10 +59,6 @@ public enum SortOrder implements Writeable { return values()[ordinal]; } - public static SortOrder readOrderFrom(StreamInput in) throws IOException { - return PROTOTYPE.readFrom(in); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(this.ordinal()); diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java deleted file mode 100644 index 5349d6fc0d6..00000000000 --- a/core/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.sort; - -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.join.BitSetProducer; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.SearchParseElement; -import org.elasticsearch.search.SearchParseException; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * - */ -public class SortParseElement implements SearchParseElement { - - public static final SortField SORT_SCORE = new SortField(null, SortField.Type.SCORE); - private static final SortField SORT_SCORE_REVERSE = new SortField(null, SortField.Type.SCORE, true); - private static final SortField SORT_DOC = new SortField(null, SortField.Type.DOC); - private static final SortField SORT_DOC_REVERSE = new SortField(null, SortField.Type.DOC, true); - - public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type"); - - public static final String SCORE_FIELD_NAME = "_score"; - public static final String DOC_FIELD_NAME = "_doc"; - - private static final Map PARSERS; - - static { - Map parsers = new HashMap<>(); - addParser(parsers, new ScriptSortParser()); - addParser(parsers, new GeoDistanceSortParser()); - PARSERS = unmodifiableMap(parsers); - } - - private static void addParser(Map parsers, SortParser parser) { - for (String name : parser.names()) { - parsers.put(name, parser); - } - } - - @Override - public void parse(XContentParser parser, SearchContext context) throws Exception { - XContentParser.Token token = parser.currentToken(); - List sortFields = new ArrayList<>(2); - if (token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.START_OBJECT) { - addCompoundSortField(parser, context, sortFields); - } else if (token == XContentParser.Token.VALUE_STRING) { - addSortField(context, sortFields, parser.text(), false, null, null, null, null); - } else { - throw new IllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed"); - } - } - } else if (token == XContentParser.Token.VALUE_STRING) { - addSortField(context, sortFields, parser.text(), false, null, null, null, null); - } else if (token == XContentParser.Token.START_OBJECT) { - addCompoundSortField(parser, context, sortFields); - } else { - throw new IllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); - } - if (!sortFields.isEmpty()) { - // optimize if we just sort on score non reversed, we don't really need sorting - boolean sort; - if (sortFields.size() > 1) { - sort = true; - } else { - SortField sortField = sortFields.get(0); - if (sortField.getType() == SortField.Type.SCORE && !sortField.getReverse()) { - sort = false; - } else { - sort = true; - } - } - if (sort) { - context.sort(new Sort(sortFields.toArray(new SortField[sortFields.size()]))); - } - } - } - - private void addCompoundSortField(XContentParser parser, SearchContext context, List sortFields) throws Exception { - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String fieldName = parser.currentName(); - boolean reverse = false; - String missing = null; - String innerJsonName = null; - String unmappedType = null; - MultiValueMode sortMode = null; - NestedInnerQueryParseSupport nestedFilterParseHelper = null; - token = parser.nextToken(); - if (token == XContentParser.Token.VALUE_STRING) { - String direction = parser.text(); - if (direction.equals("asc")) { - reverse = SCORE_FIELD_NAME.equals(fieldName); - } else if (direction.equals("desc")) { - reverse = !SCORE_FIELD_NAME.equals(fieldName); - } else { - throw new IllegalArgumentException("sort direction [" + fieldName + "] not supported"); - } - addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); - } else { - if (PARSERS.containsKey(fieldName)) { - sortFields.add(PARSERS.get(fieldName).parse(parser, context)); - } else { - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - innerJsonName = parser.currentName(); - } else if (token.isValue()) { - if ("reverse".equals(innerJsonName)) { - reverse = parser.booleanValue(); - } else if ("order".equals(innerJsonName)) { - if ("asc".equals(parser.text())) { - reverse = SCORE_FIELD_NAME.equals(fieldName); - } else if ("desc".equals(parser.text())) { - reverse = !SCORE_FIELD_NAME.equals(fieldName); - } - } else if ("missing".equals(innerJsonName)) { - missing = parser.textOrNull(); - } else if (context.parseFieldMatcher().match(innerJsonName, UNMAPPED_TYPE)) { - unmappedType = parser.textOrNull(); - } else if ("mode".equals(innerJsonName)) { - sortMode = MultiValueMode.fromString(parser.text()); - } else if ("nested_path".equals(innerJsonName) || "nestedPath".equals(innerJsonName)) { - if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); - } - nestedFilterParseHelper.setPath(parser.text()); - } else { - throw new IllegalArgumentException("sort option [" + innerJsonName + "] not supported"); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { - if (nestedFilterParseHelper == null) { - nestedFilterParseHelper = new NestedInnerQueryParseSupport(parser, context); - } - nestedFilterParseHelper.filter(); - } else { - throw new IllegalArgumentException("sort option [" + innerJsonName + "] not supported"); - } - } - } - addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); - } - } - } - } - } - - private void addSortField(SearchContext context, List sortFields, String fieldName, boolean reverse, String unmappedType, @Nullable final String missing, MultiValueMode sortMode, NestedInnerQueryParseSupport nestedHelper) throws IOException { - if (SCORE_FIELD_NAME.equals(fieldName)) { - if (reverse) { - sortFields.add(SORT_SCORE_REVERSE); - } else { - sortFields.add(SORT_SCORE); - } - } else if (DOC_FIELD_NAME.equals(fieldName)) { - if (reverse) { - sortFields.add(SORT_DOC_REVERSE); - } else { - sortFields.add(SORT_DOC); - } - } else { - MappedFieldType fieldType = context.smartNameFieldType(fieldName); - if (fieldType == null) { - if (unmappedType != null) { - fieldType = context.mapperService().unmappedFieldType(unmappedType); - } else { - throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on", null); - } - } - - if (!fieldType.isSortable()) { - throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]", null); - } - - // Enable when we also know how to detect fields that do tokenize, but only emit one token - /*if (fieldMapper instanceof StringFieldMapper) { - StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper; - if (stringFieldMapper.fieldType().tokenized()) { - // Fail early - throw new SearchParseException(context, "Can't sort on tokenized string field[" + fieldName + "]"); - } - }*/ - - // We only support AVG and SUM on number based fields - if (fieldType.isNumeric() == false && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - sortMode = null; - } - if (sortMode == null) { - sortMode = resolveDefaultSortMode(reverse); - } - - final Nested nested; - if (nestedHelper != null && nestedHelper.getPath() != null) { - BitSetProducer rootDocumentsFilter = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()); - Query innerDocumentsFilter; - if (nestedHelper.filterFound()) { - // TODO: use queries instead - innerDocumentsFilter = nestedHelper.getInnerFilter(); - } else { - innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); - } - nested = new Nested(rootDocumentsFilter, context.searcher().createNormalizedWeight(innerDocumentsFilter, false)); - } else { - nested = null; - } - - IndexFieldData.XFieldComparatorSource fieldComparatorSource = context.fieldData().getForField(fieldType) - .comparatorSource(missing, sortMode, nested); - sortFields.add(new SortField(fieldType.name(), fieldComparatorSource, reverse)); - } - } - - private static MultiValueMode resolveDefaultSortMode(boolean reverse) { - return reverse ? MultiValueMode.MAX : MultiValueMode.MIN; - } - -} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java b/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java index 2b4687c8497..81c73df53fa 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/DirectSpellcheckerSettings.java @@ -25,16 +25,29 @@ import org.apache.lucene.util.automaton.LevenshteinAutomata; public class DirectSpellcheckerSettings { - private SuggestMode suggestMode = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; - private float accuracy = 0.5f; - private Suggest.Suggestion.Sort sort = Suggest.Suggestion.Sort.SCORE; - private StringDistance stringDistance = DirectSpellChecker.INTERNAL_LEVENSHTEIN; - private int maxEdits = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; - private int maxInspections = 5; - private float maxTermFreq = 0.01f; - private int prefixLength = 1; - private int minWordLength = 4; - private float minDocFreq = 0f; + // NB: If this changes, make sure to change the default in TermBuilderSuggester + public static SuggestMode DEFAULT_SUGGEST_MODE = SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX; + public static float DEFAULT_ACCURACY = 0.5f; + public static SortBy DEFAULT_SORT = SortBy.SCORE; + // NB: If this changes, make sure to change the default in TermBuilderSuggester + public static StringDistance DEFAULT_STRING_DISTANCE = DirectSpellChecker.INTERNAL_LEVENSHTEIN; + public static int DEFAULT_MAX_EDITS = LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE; + public static int DEFAULT_MAX_INSPECTIONS = 5; + public static float DEFAULT_MAX_TERM_FREQ = 0.01f; + public static int DEFAULT_PREFIX_LENGTH = 1; + public static int DEFAULT_MIN_WORD_LENGTH = 4; + public static float DEFAULT_MIN_DOC_FREQ = 0f; + + private SuggestMode suggestMode = DEFAULT_SUGGEST_MODE; + private float accuracy = DEFAULT_ACCURACY; + private SortBy sort = DEFAULT_SORT; + private StringDistance stringDistance = DEFAULT_STRING_DISTANCE; + private int maxEdits = DEFAULT_MAX_EDITS; + private int maxInspections = DEFAULT_MAX_INSPECTIONS; + private float maxTermFreq = DEFAULT_MAX_TERM_FREQ; + private int prefixLength = DEFAULT_PREFIX_LENGTH; + private int minWordLength = DEFAULT_MIN_WORD_LENGTH; + private float minDocFreq = DEFAULT_MIN_DOC_FREQ; public SuggestMode suggestMode() { return suggestMode; @@ -52,11 +65,11 @@ public class DirectSpellcheckerSettings { this.accuracy = accuracy; } - public Suggest.Suggestion.Sort sort() { + public SortBy sort() { return sort; } - public void sort(Suggest.Suggestion.Sort sort) { + public void sort(SortBy sort) { this.sort = sort; } @@ -104,8 +117,8 @@ public class DirectSpellcheckerSettings { return minWordLength; } - public void minQueryLength(int minQueryLength) { - this.minWordLength = minQueryLength; + public void minWordLength(int minWordLength) { + this.minWordLength = minWordLength; } public float minDocFreq() { @@ -116,4 +129,20 @@ public class DirectSpellcheckerSettings { this.minDocFreq = minDocFreq; } -} \ No newline at end of file + @Override + public String toString() { + return "[" + + "suggestMode=" + suggestMode + + ",sort=" + sort + + ",stringDistance=" + stringDistance + + ",accuracy=" + accuracy + + ",maxEdits=" + maxEdits + + ",maxInspections=" + maxInspections + + ",maxTermFreq=" + maxTermFreq + + ",prefixLength=" + prefixLength + + ",minWordLength=" + minWordLength + + ",minDocFreq=" + minDocFreq + + "]"; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java b/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java new file mode 100644 index 00000000000..14d46d134de --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.suggest; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; +import java.util.Objects; + +/** + * An enum representing the valid sorting options + */ +public enum SortBy implements Writeable { + /** Sort should first be based on score, then document frequency and then the term itself. */ + SCORE, + /** Sort should first be based on document frequency, then score and then the term itself. */ + FREQUENCY; + + public static SortBy PROTOTYPE = SCORE; + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeVInt(ordinal()); + } + + @Override + public SortBy readFrom(final StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown SortBy ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + public static SortBy resolve(final String str) { + Objects.requireNonNull(str, "Input string is null"); + return valueOf(str.toUpperCase(Locale.ROOT)); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java index 5ec92264389..f9c7092fbf1 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -47,9 +46,7 @@ import java.util.Map; */ public class Suggest implements Iterable>>, Streamable, ToXContent { - public static class Fields { - public static final XContentBuilderString SUGGEST = new XContentBuilderString("suggest"); - } + private static final XContentBuilderString NAME = new XContentBuilderString("suggest"); private static final Comparator